prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import iris
import iris.plot as iplt
import iris.quickplot as qplt
import numpy as np
import matplotlib.pyplot as plt
import fnmatch
import os
import matplotlib.dates as mdates
import sys
reload(sys)
sys.getdefaultencoding()
import numpy.ma as ma
import cartopy.crs as ccrs
import matplotlib
from tools import rotate_data
def load_files(case):
if case == 'CS1':
os.chdir('/data/clivarm/wip/ellgil82/May_2016/Re-runs/CS1/') # path to data
filepath = '/data/clivarm/wip/ellgil82/May_2016/Re-runs/CS1/'
case_date = '20160507T1200Z'
elif case == 'CS2':
os.chdir('/data/clivarm/wip/ellgil82/May_2016/Re-runs/CS2/')
filepath = '/data/clivarm/wip/ellgil82/May_2016/Re-runs/CS2/'
case_date = '20160522T1200Z'
os.chdir(filepath)
surf = filepath+case_date+'_Peninsula_km4p0_ctrl_pa000.pp'
print ('importing cubes...')
T_air = iris.load_cube(surf, 'air_temperature')
T_surf = iris.load_cube(surf, 'surface_temperature')
lsm = iris.load_cube(surf, 'land_binary_mask')
orog = iris.load_cube(surf, 'surface_altitude')
P = iris.load_cube(surf, 'surface_air_pressure')
P.convert_units('hPa')
T_air.convert_units('celsius')
T_surf.convert_units('celsius')
## Iris v1.11 version
u_wind = iris.load_cube(surf, 'x_wind')
v_wind = iris.load_cube(surf, 'y_wind')
v_wind = v_wind[:,1:,:]
## Rotate projection
for var in [T_air, T_surf, u_wind, v_wind, P]:
real_lon, real_lat = rotate_data(var,1,2)
for var in [lsm, orog]:
real_lon, real_lat = rotate_data(var, 0, 1)
P_ma = np.ma.masked_where(lsm.data == 1, P[0,:,:].data) #orog.data > 0 &
return u_wind[0,:,:], v_wind[0,:,:], T_air[0,:,:], T_surf[0,:,:], P_ma, lsm, real_lat, real_lon, orog
u_wind, v_wind, T_air, T_surf, P, lsm, real_lat, real_lon, orog = load_files('CS1')
def shiftedColorMap(cmap, min_val, max_val, name, var):
epsilon = 0.001
start, stop = 0.00, 1.0 #0.15, 0.85
min_val, max_val = min(0.0, min_val), max(0.0, max_val)
midpoint = 1.0 - max_val / (max_val + abs(min_val))
cdict = {'red': [], 'green': [], 'blue': [], 'alpha': []}
# regular index to compute the colors
reg_index = np.linspace(start, stop, 257)
# shifted index to match the data
shift_index = np.hstack([np.linspace(0.0, midpoint, 128, endpoint=False), np.linspace(midpoint, 1.0, 129, endpoint=True)])
for ri, si in zip(reg_index, shift_index):
if abs(si - midpoint) < epsilon:
r, g, b, a = cmap(0.5) # 0.5 = original midpoint.
else:
r, g, b, a = cmap(ri)
cdict['red'].append((si, r, r))
cdict['green'].append((si, g, g))
cdict['blue'].append((si, b, b))
cdict['alpha'].append((si, a, a))
newcmap = matplotlib.colors.LinearSegmentedColormap(name, cdict)
plt.register_cmap(cmap=newcmap)
return newcmap
## Caption:
def plot_synop():
fig = plt.figure(figsize=(10, 12))
ax = fig.add_axes([0.18, 0.25, 0.75, 0.63], frameon=False)#, projection=ccrs.PlateCarree())#
ax.tick_params(which='both', axis='both', labelsize=34, labelcolor='dimgrey', pad=10, size=0, tick1On=False, tick2On=False)
PlotLonMin = np.min(real_lon)
PlotLonMax = np.max(real_lon)
PlotLatMin = np.min(real_lat)
PlotLatMax = np.max(real_lat)
XTicks = np.linspace(PlotLonMin, PlotLonMax, 3)
XTickLabels = [None] * len(XTicks)
for i, XTick in enumerate(XTicks):
if XTick < 0:
XTickLabels[i] = '{:.0f}{:s}'.format( | np.abs(XTick) | numpy.abs |
import sys
import numpy as np
import h5py
import matplotlib.pyplot as plt
question = sys.argv[1]
def berkan_ozdamar_21602353_hw2(question):
if question == '1':
import numpy as np
import h5py
import matplotlib.pyplot as plt
# Part A
f = h5py.File('assign2_data1.h5', 'r')
dataKeys = list(f.keys())
print('The data keys are:' + str(dataKeys))
# Gathering the train images, test images, train labels and test labels.
testims = f['testims']
testlbls = f['testlbls']
trainims = f['trainims']
trainlbls = f['trainlbls']
print('The size of testims is: ' + str(np.shape(testims)))
print('The size of testlbls is: ' + str(np.shape(testlbls)))
print('The size of trainims is: ' + str(np.shape(trainims)))
print('The size of trainlbls is: ' + str(np.shape(trainlbls)))
figureNum = 0
plt.figure(figureNum)
plt.title('Example of a Cat Image')
plt.imshow(trainims[0].T, cmap='gray')
plt.show()
figureNum = 0
plt.figure(figureNum)
plt.title('Example of a Car Image')
plt.imshow(trainims[1500].T, cmap='gray')
plt.show()
class HiddenLayer:
def __init__(self, neuronNum, neuronSize, mean, std):
'''
This class creates a hidden laer for neural network.
Weights and bias are initially random Gaussian distribution.
INPUTS:
neuronNum : neuronNum is the features a neuron holds.
neuronSize : neuronSize is the number of neurons in a hidden layer.
mean : mean for Gaussian distribution.
std : Standard deviation for Gaussian distribution.
RETURNS:
'''
np.random.seed(15)
self.weights = np.random.normal(loc=mean, scale=std, size=(neuronNum, neuronSize))
self.bias = np.random.normal(loc=mean, scale=std, size=(1, neuronSize))
self.Z = None
self.A = None
self.grad = None
self.dB = None
self.dW = None
self.error = None
self.momentum_dw = 0
self.momentum_db = 0
class MLP:
def __init__(self, momentum=False, momentumCoef=0):
'''
This class creates a multilayer perceptron network.
INPUTS:
momentum : momentum is the boolean for the network which indicates
whether the momentum learning will be done or not
momentumCoef : Coefficient of momentum learning
RETURNS:
'''
self.momentum = momentum
self.momentumCoef = momentumCoef
self.layers = list()
self.batchSize = 0
def addLayer(self, layer):
'''
This function adds a HiddenLayer class to the network.
INPUTS:
layer : layer is an instance of HiddenLayer class
RETURNS:
'''
self.layers.append(layer)
def tanh(self, x):
'''
This function is the hyperbolic tangent for the activation functions of each neuron.
INPUTS:
x : x is the weighted sum which will be pushed to activation function.
RETURNS:
result : result is the hyperbolic tangent of the input x.
'''
result = 2 / (1 + np.exp(-2 * x)) - 1
return result
def der_tanh(self, x):
'''
This function is the derivative hyperbolic tangent. This function will be used in backpropagation.
INPUTS:
x : x is the input.
RETURNS:
result : result is the derivative of hyperbolic tangent of the input x.
'''
result = 1 - self.tanh(x) ** 2
return result
def MSE(self, y, y_pred):
'''
MSE is the loss function for the network.
INPUTS:
y : y is the labels for our data.
y_pred : y_pred is the network's prediction.
RETURNS:
loss : loss is the mean squared error between y and y_pred.
'''
error = y - y_pred
loss = np.mean(error ** 2)
return loss
def der_MSE(self, y, y_pred):
'''
der_MSE is the derivative of loss function for the network.
This function will be used for backpropagation.
INPUTS:
y : y is the labels for our data.
y_pred : y_pred is the network's prediction.
RETURNS:
result : result is the derivative of the MSE between y and y_pred.
'''
result = y_pred - y
return result
def MCE(self, y, y_pred):
'''
MCE is the accuracy of our network. Mean classification error will be calculated to find accuracy.
INPUTS:
y : y is the labels for our data.
y_pred : y_pred is the network's prediction.
RETURNS:
: returns the accuracy between y and y_pred.
'''
count = 0
for i in range(len(y)):
if (y[i] == y_pred[i]):
count += 1
return 100 * (count / len(y))
def forward(self, data):
'''
forward function is the forward propagation.
INPUTS:
data : data is the input which will pushed to forward propagation.
RETURNS:
: returns the prediction of the network.
'''
layerSize = np.shape(self.layers)[0]
for i in range(layerSize):
# 1st Hidden Layer
if (i == 0):
self.layers[i].A = data.dot(self.layers[i].weights) + self.layers[i].bias
self.layers[i].Z = self.tanh(self.layers[i].A)
# Other Hidden Layers and Output Layer
else:
self.layers[i].A = (self.layers[i - 1].Z).dot(self.layers[i].weights) + self.layers[i].bias
self.layers[i].Z = self.tanh(self.layers[i].A)
return self.layers[-1].Z
def back_propagation(self, data, label):
'''
back_propagation function is the back propagation algorithm for weight and bias updates.
back_propagation function first calls forward function to predict the output of network which us y_pred.
INPUTS:
data : data is the input.
label : label is the labels of the data.
RETURNS:
'''
layerSize = np.shape(self.layers)[0]
y_pred = self.forward(data)
for i in range(layerSize)[::-1]:
# Output Layer
if (i == layerSize - 1):
self.layers[i].error = self.der_MSE(label, y_pred)
self.layers[i].error = np.array(self.layers[i].error).reshape(-1, 1)
self.layers[i].grad = (self.layers[i].error) * (self.der_tanh(self.layers[i].A))
self.layers[i].dW = (self.layers[i - 1].Z).T.dot(self.layers[i].grad)
self.layers[i].dB = np.sum(self.layers[i].grad, axis=0, keepdims=True)
# 1st Hidden Layer
elif (i == 0):
self.layers[i].error = (self.layers[i + 1].grad).dot(self.layers[i + 1].weights.T)
self.layers[i].grad = (self.layers[i].error) * self.der_tanh(self.layers[i].A)
self.layers[i].dW = data.T.dot(self.layers[i].grad)
self.layers[i].dB = np.sum(self.layers[i].grad, axis=0, keepdims=True)
# Other Hidden Layers
else:
self.layers[i].error = (self.layers[i + 1].grad).dot(self.layers[i + 1].weights.T)
self.layers[i].grad = (self.layers[i].error) * self.der_tanh(self.layers[i].A)
self.layers[i].dW = (self.layers[i - 1].Z).T.dot(self.layers[i].grad)
self.layers[i].dB = np.sum(self.layers[i].grad, axis=0, keepdims=True)
def update_weights(self, data, label, learningRate):
'''
update_weights function updates the weights with the gradients found with back_propagation.
INPUTS:
data : data is the input.
label : label is the labels of the data.
learnigRate : learningRate is the coefficient for the weight update.
RETURNS:
'''
layerSize = np.shape(self.layers)[0]
self.back_propagation(data, label)
# If momentum is used.
if (self.momentum == True):
for i in range(layerSize):
self.layers[i].momentum_dw = self.layers[i].dW + (
self.momentumCoef * self.layers[i].momentum_dw)
self.layers[i].momentum_db = self.layers[i].dB + (
self.momentumCoef * self.layers[i].momentum_db)
self.layers[i].weights -= (learningRate * self.layers[i].momentum_dw) / self.batchSize
self.layers[i].bias -= (learningRate * self.layers[i].momentum_db) / self.batchSize
# If momentum is not used.
else:
for i in range(layerSize):
self.layers[i].weights -= (learningRate * self.layers[i].dW) / self.batchSize
self.layers[i].bias -= (learningRate * self.layers[i].dB) / self.batchSize
def predict(self, y_pred):
'''
predict function predicts and output from the network's output y_pred.
INPUTS:
y_pred : MLP's output.
RETURNS:
: returns the label for prediction of the network.
'''
return np.where(y_pred >= 0, 1, -1)
def trainNetwork(self, data, label, testData, testLabel, learningRate, batchNum, epoch):
'''
trainNetwork function calls the update_weights function to train the network over mini-batches
for fiven number of epochs.
INPUTS:
data : data is the training data.
label : label is the labels of the data.
testData : testData is the test data.
testLabel : testLabel is the labels of the testData.
learnigRate : learningRate is the coefficient for the weight update.
batchNum : batchNum is the number of mini-batches.
epoch : Number of times the network train the whole data.
RETURNS:
MSE_loss : MSE loss of the training data.
MCE_loss : MCE loss of the training data.
test_MSE : MSE loss of the test data.
test_MCE : MCE loss of the test data.
'''
MSE_loss = list()
MCE_loss = list()
test_MSE = list()
test_MCE = list()
np.random.seed(7)
for i in range(epoch):
randomIndexes = np.random.permutation(len(label))
data = data[randomIndexes]
label = label[randomIndexes]
batchLength = len(label) / batchNum
self.batchSize = batchLength
for j in range(batchNum):
start = int(batchLength * j)
end = int(batchLength * (j + 1))
self.update_weights(data[start:end], label[start:end], learningRate)
y_pred = self.forward(data)
loss = self.MSE(label, y_pred)
MSE_loss.append(loss)
loss_MCE = self.MCE(label, self.predict(y_pred))
MCE_loss.append(loss_MCE)
y_pred_test = self.forward(testData)
test_loss = self.MSE(testLabel, y_pred_test)
test_MSE.append(test_loss)
test_loss_MCE = self.MCE(testLabel, self.predict(y_pred_test))
test_MCE.append(test_loss_MCE)
return MSE_loss, MCE_loss, test_MSE, test_MCE
trainimages = np.asarray(trainims)
testimages = np.asarray(testims)
trainlabels = np.asarray(trainlbls)
testlabels = np.asarray(testlbls)
trainlabels = np.where(trainlabels == 0, -1, 1)
testlabels = np.where(testlabels == 0, -1, 1)
trainlabels = trainlabels.reshape(-1, 1)
testlabels = testlabels.reshape(-1, 1)
train_img_flat = trainimages.reshape(1900, 32 ** 2)
test_img_flat = testimages.reshape(1000, 32 ** 2)
# learningRate = np.arange(0.1, 0.5, 0.05)
# N_layer = np.arange(10,20,2)
# best_lr = 0
# best_n = 0
# mse_best = np.inf
# for n in N_layer:
# print(n)
# neuralNet = MLP()
# neuralNet.addLayer(HiddenLayer(32**2, n, 0, 0.02))
# neuralNet.addLayer(HiddenLayer(n, 1, 0, 0.02))
# for lr in learningRate:
# mse_loss, mce_loss, test_mse, test_mce = neuralNet.trainNetwork(train_img_flat/255, trainlabels, test_img_flat/255, testlabels, lr, 50, 300)
# if(mse_loss[-1] < mse_best):
# best_lr = lr
# best_n = n
# mse_best = mse_loss[-1]
# print("Best Learning Rate: " +str(best_lr))
# print("Best Number of Hidden Neuron: " +str(best_n))
best_lr = 0.35
best_n = 20
neuralNet = MLP()
neuralNet.addLayer(HiddenLayer(32 ** 2, best_n, 0, 0.02))
neuralNet.addLayer(HiddenLayer(best_n, 1, 0, 0.02))
mse_loss, mce_loss, test_mse, test_mce = neuralNet.trainNetwork(train_img_flat / 255, trainlabels,
test_img_flat / 255, testlabels, best_lr, 50,
250)
figureNum = 0
plt.figure(figureNum)
plt.plot(mse_loss)
plt.title('MSE Over Training Data')
plt.xlabel('Epoch')
plt.ylabel('MSE')
plt.show()
figureNum += 1
plt.figure(figureNum)
plt.plot(mce_loss)
plt.title('MCE Over Training Data')
plt.show()
figureNum += 1
plt.figure(figureNum)
plt.plot(test_mse)
plt.title('MSE Over Test Data')
plt.show()
figureNum += 1
plt.figure(figureNum)
plt.plot(test_mce)
plt.title('MCE Over Test Data')
plt.show()
print(np.argmax(test_mce))
# Part C
best_lr = 0.35
high_n = 200
neuralNet = MLP()
neuralNet.addLayer(HiddenLayer(32 ** 2, high_n, 0, 0.02))
neuralNet.addLayer(HiddenLayer(high_n, 1, 0, 0.02))
mse_loss_highN, mce_loss_highN, test_mse_highN, test_mce_highN = neuralNet.trainNetwork(train_img_flat / 255,
trainlabels,
test_img_flat / 255,
testlabels, best_lr, 50,
250)
best_lr = 0.35
low_n = 4
neuralNet = MLP()
neuralNet.addLayer(HiddenLayer(32 ** 2, low_n, 0, 0.02))
neuralNet.addLayer(HiddenLayer(low_n, 1, 0, 0.02))
mse_loss_lowN, mce_loss_lowN, test_mse_lowN, test_mce_lowN = neuralNet.trainNetwork(train_img_flat / 255,
trainlabels,
test_img_flat / 255,
testlabels, best_lr, 50,
250)
figureNum += 1
plt.figure(figureNum)
plt.plot(mse_loss)
plt.plot(mse_loss_highN)
plt.plot(mse_loss_lowN)
plt.title('MSE Over Training Data')
plt.xlabel('Epoch')
plt.ylabel('MSE')
plt.legend(
["Hidden Neuron = " + str(best_n), "Hidden Neuron = " + str(high_n), "Hidden Neuron = " + str(low_n)])
plt.grid()
plt.show()
figureNum += 1
plt.figure(figureNum)
plt.plot(mce_loss)
plt.plot(mce_loss_highN)
plt.plot(mce_loss_lowN)
plt.title('MCE Over Training Data')
plt.xlabel('Epoch')
plt.ylabel('MCE')
plt.legend(
["Hidden Neuron = " + str(best_n), "Hidden Neuron = " + str(high_n), "Hidden Neuron = " + str(low_n)])
plt.grid()
plt.show()
figureNum += 1
plt.figure(figureNum)
plt.plot(test_mse)
plt.plot(test_mse_highN)
plt.plot(test_mse_lowN)
plt.title('MSE Over Test Data')
plt.xlabel('Epoch')
plt.ylabel('MSE')
plt.legend(
["Hidden Neuron = " + str(best_n), "Hidden Neuron = " + str(high_n), "Hidden Neuron = " + str(low_n)])
plt.grid()
plt.show()
figureNum += 1
plt.figure(figureNum)
plt.plot(test_mce)
plt.plot(test_mce_highN)
plt.plot(test_mce_lowN)
plt.title('MCE Over Test Data')
plt.xlabel('Epoch')
plt.ylabel('MCE')
plt.legend(
["Hidden Neuron = " + str(best_n), "Hidden Neuron = " + str(high_n), "Hidden Neuron = " + str(low_n)])
plt.grid()
plt.show()
# Part D
neuralNet_2L = MLP()
neuralNet_2L.addLayer(HiddenLayer(32 ** 2, 512, 0, 0.02))
neuralNet_2L.addLayer(HiddenLayer(512, 64, 0, 0.02))
neuralNet_2L.addLayer(HiddenLayer(64, 1, 0, 0.02))
mse_loss_2l, mce_loss_2l, test_mse_2l, test_mce_2l = neuralNet_2L.trainNetwork(train_img_flat / 255,
trainlabels, test_img_flat / 255,
testlabels, 0.35, 50, 200)
figureNum += 1
plt.figure(figureNum)
plt.title('MSE Over Training Data (2 Hidden Layer)')
plt.xlabel('Epoch')
plt.ylabel('MSE')
plt.plot(mse_loss_2l)
plt.show()
figureNum += 1
plt.figure(figureNum)
plt.title('MCE Over Training Data (2 Hidden Layer)')
plt.xlabel('Epoch')
plt.ylabel('MCE')
plt.plot(mce_loss_2l)
plt.show()
figureNum += 1
plt.figure(figureNum)
plt.title('MSE Over Test Data (2 Hidden Layer)')
plt.xlabel('Epoch')
plt.ylabel('MSE')
plt.plot(test_mse_2l)
plt.show()
figureNum += 1
plt.figure(figureNum)
plt.title('MCE Over Test Data (2 Hidden Layer)')
plt.xlabel('Epoch')
plt.ylabel('MCE')
plt.plot(test_mce_2l)
plt.show()
# Part E
neuralNet_2L = MLP(momentum=True, momentumCoef=0.2)
neuralNet_2L.addLayer(HiddenLayer(32 ** 2, 512, 0, 0.02))
neuralNet_2L.addLayer(HiddenLayer(512, 64, 0, 0.02))
neuralNet_2L.addLayer(HiddenLayer(64, 1, 0, 0.02))
mse_loss_2l_m, mce_loss_2l_m, test_mse_2l_m, test_mce_2l_m = neuralNet_2L.trainNetwork(train_img_flat / 255,
trainlabels,
test_img_flat / 255,
testlabels, 0.35, 50,
200)
figureNum += 1
plt.figure(figureNum)
plt.plot(mse_loss_2l)
plt.plot(mse_loss_2l_m)
plt.title('MSE Over Training Data (2 Hidden Layer)')
plt.xlabel('Epoch')
plt.ylabel('MSE')
plt.legend(["w/o Momentum", "with Momentum"])
plt.grid()
plt.show()
figureNum += 1
plt.figure(figureNum)
plt.plot(mce_loss_2l)
plt.plot(mce_loss_2l_m)
plt.title('MCE Over Training Data (2 Hidden Layer)')
plt.xlabel('Epoch')
plt.ylabel('MCE')
plt.legend(["w/o Momentum", "with Momentum"])
plt.grid()
plt.show()
figureNum += 1
plt.figure(figureNum)
plt.plot(test_mse_2l)
plt.plot(test_mse_2l_m)
plt.title('MSE Over Test Data (2 Hidden Layer)')
plt.xlabel('Epoch')
plt.ylabel('MSE')
plt.legend(["w/o Momentum", "with Momentum"])
plt.grid()
plt.show()
figureNum += 1
plt.figure(figureNum)
plt.plot(test_mce_2l)
plt.plot(test_mce_2l_m)
plt.title('MCE Over Test Data (2 Hidden Layer)')
plt.xlabel('Epoch')
plt.ylabel('MCE')
plt.legend(["w/o Momentum", "with Momentum"])
plt.grid()
plt.show()
elif question == '2' :
import numpy as np
import h5py
import matplotlib.pyplot as plt
# Part A
f = h5py.File('assign2_data2.h5', 'r')
dataKeys = list(f.keys())
print('The data keys are:' + str(dataKeys))
# Gathering the train images, test images, train labels and test labels.
testdata = np.asarray(f['testx'])
testlbls = np.asarray(f['testd'])
traindata = np.asarray(f['trainx'])
trainlbls = np.asarray(f['traind'])
validdata = np.asarray(f['valx'])
validlbls = np.asarray(f['vald'])
words = np.asarray(f['words'])
print('The size of testdata is: ' + str(np.shape(testdata)))
print('The size of testlbls is: ' + str(np.shape(testlbls)))
print('The size of traindata is: ' + str(np.shape(traindata)))
print('The size of trainlbls is: ' + str(np.shape(trainlbls)))
print('The size of validdata is: ' + str(np.shape(validdata)))
print('The size of validlbls is: ' + str(np.shape(validlbls)))
print('The size of words is: ' + str(np.shape(words)))
# One Hot Encode Transform
def one_hot_encoder(x):
samplesize = np.shape(x)[0]
x = x.reshape(samplesize, -1)
featuresize = np.shape(x)[1]
result = np.zeros((samplesize, featuresize, 250))
for i in range(samplesize):
for j in range(featuresize):
a = x[i, j]
result[i, j, a - 1] = 1
if (featuresize == 1):
result = result.reshape(-1, 250)
else:
result = result.reshape(samplesize, 250, featuresize)
return result
train_data = one_hot_encoder(traindata)
train_labels = one_hot_encoder(trainlbls)
test_data = one_hot_encoder(testdata)
test_labels = one_hot_encoder(testlbls)
val_data = one_hot_encoder(validdata)
val_labels = one_hot_encoder(validlbls)
print('The size of test_data is: ' + str(np.shape(test_data)))
print('The size of test_labels is: ' + str(np.shape(test_labels)))
print('The size of train_data is: ' + str(np.shape(train_data)))
print('The size of train_labels is: ' + str(np.shape(train_labels)))
print('The size of val_data is: ' + str(np.shape(val_data)))
print('The size of val_labels is: ' + str(np.shape(val_labels)))
class HiddenLayer:
def __init__(self, neuronNum, neuronSize, mean, std):
'''
This class creates a hidden laer for neural network.
Weights and bias are initially random Gaussian distribution.
INPUTS:
neuronNum : neuronNum is the features a neuron holds.
neuronSize : neuronSize is the number of neurons in a hidden layer.
mean : mean for Gaussian distribution.
std : Standard deviation for Gaussian distribution.
RETURNS:
'''
np.random.seed(8)
self.weights = np.random.normal(loc=mean, scale=std, size=(neuronNum, neuronSize))
self.bias = np.random.normal(loc=mean, scale=std, size=(1, neuronSize))
self.Z = None
self.A = None
self.grad = None
self.dB = None
self.dW = None
self.error = None
self.momentum_dw = 0
self.momentum_db = 0
class MLP:
def __init__(self, momentum=False, momentumCoef=0):
'''
This class creates a multilayer perceptron network.
INPUTS:
momentum : momentum is the boolean for the network which indicates
whether the momentum learning will be done or not
momentumCoef : Coefficient of momentum learning
RETURNS:
'''
self.momentum = momentum
self.momentumCoef = momentumCoef
self.layers = list()
self.batchSize = 0
def addLayer(self, layer):
'''
This function adds a HiddenLayer class to the network.
INPUTS:
layer : layer is an instance of HiddenLayer class
RETURNS:
'''
self.layers.append(layer)
def sigmoid(self, x):
'''
This function is the sigmoid for the activation function.
INPUTS:
x : x is the weighted sum which will be pushed to activation function.
RETURNS:
result : result is the sigmoid of the input x.
'''
result = 1 / (1 + np.exp(-x))
return result
def der_sigmoid(self, x):
'''
This function is the derivative of sigmoid function.
INPUTS:
x : x is the input.
RETURNS:
result : result is the derivative of sigmoid of the input x.
'''
result = self.sigmoid(x) * (1 - self.sigmoid(x))
return result
def softmax(self, x):
'''
This function is the softmax for the activation function of output layer.
INPUTS:
x : x is the weighted sum which will be pushed to activation function.
RETURNS:
result : result is the softmax of the input x.
'''
e_x = np.exp(x - | np.max(x, axis=-1, keepdims=True) | numpy.max |
import os
from gym import error, spaces
from gym.utils import seeding
import numpy as np
from gym.envs.flex import flex_env
import pygame as pg
import itertools
from pygame.locals import *
from OpenGL.GL import *
from OpenGL.GLU import *
from scipy.spatial.distance import cdist
from scipy.spatial.transform import Rotation as R
try:
import bindings as pyFlex
except ImportError as e:
raise error.DependencyNotInstalled(
"{}. (HINT: PyFlex Binding is not installed correctly)".format(e))
class PlasticFlippingEnv(flex_env.FlexEnv):
def __init__(self):
self.resolution = 32
self.direct_info_dim = 13
obs_size = self.resolution * self.resolution *1 + self.direct_info_dim
self.frame_skip = 10
self.mapHalfExtent = 4
self.mapPartitionSize = 3
self.idxPool = np.array([x for x in itertools.product(np.arange(self.mapPartitionSize) - int(
self.mapPartitionSize / 2), np.arange(self.mapPartitionSize) - int(self.mapPartitionSize / 2))])
self.numInitClusters = 1
self.randomCluster = True
self.clusterDim = np.array([5,2,5])
action_bound = np.array([[-10, -10, -10, -np.pi / 2], [
10, 10, 10, np.pi / 2]])
obs_high = np.ones(obs_size) * np.inf
obs_low = -obs_high
observation_bound = np.array([obs_low, obs_high])
flex_env.FlexEnv.__init__(self, self.frame_skip, obs_size, observation_bound, action_bound, scene=2, viewer=0)
self.metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': int(np.round(1.0 / self.dt))
}
self.action_scale = (action_bound[1] - action_bound[0]) / 2
self.barDim = np.array([1.5, 2.0, 0.8])
# self.goal_gradients = np.zeros((self.numInstances,self.resolution,self.resolution))
self.initClusterparam = np.zeros(
(self.numInstances, 6 * self.numInitClusters))
self.rolloutCnt = 0
self.stage = np.ones(self.numInstances)
self.rolloutRet = np.zeros(self.numInstances)
self.currCurriculum = 0
self.rwdBuffer = [[0, 0, 0] for _ in range(100)]
print("============================================Flipping================================================")
def angle_to_rot_matrix(self, angles):
rot_vec = np.ones((self.numInstances, 2, 2))
rot_vec[:, 0, 0] = np.cos(angles)
rot_vec[:, 0, 1] = -np.sin(angles)
rot_vec[:, 1, 0] = np.sin(angles)
rot_vec[:, 1, 1] = np.cos(angles)
return rot_vec
def _step(self, action):
action = action * self.action_scale
prev_bar_state, prev_part_state, prev_part_heights,prev_part_vel = self.get_state()
rot_mat = self.angle_to_rot_matrix(action[:, 3])
transformed_action = np.zeros((self.numInstances, 6))
for i in range(action.shape[0]):
bar_rot = R.from_euler('x',prev_bar_state[i,1,0])
action_trans = bar_rot.apply(action[i,0:3])
transformed_action[i, 0:3] = action_trans + prev_bar_state[i, 0]
flex_action = np.zeros((self.numInstances, 7))
flex_action[:, 0] = transformed_action[:, 0]
flex_action[:, 1] = transformed_action[:, 1]
flex_action[:, 2] = transformed_action[:, 2]
flex_action[:, 3] = prev_bar_state[:, 1, 0] + action[:, 3]
flex_action[:, 4] = 0
flex_action[:, 5] = 0
flex_action[:, 6] = 0
prev_height_diff = np.min(prev_part_heights,axis=1)-prev_bar_state[:,0,1]
prev_com_xz = np.mean(prev_part_state,axis=1)
# Simulation
done = self.do_simulation(flex_action, self.frame_skip)
curr_bar_state, curr_part_state, curr_part_heights,curr_part_vels = self.get_state()
curr_com_xz = np.mean(curr_part_state,axis=1)
obs = self._get_obs()
height_diff = np.min(curr_part_heights,axis=1)-curr_bar_state[:,0,1]
curr_total_heat = np.zeros(self.numInstances)
curr_total_heat_cnt = np.zeros(self.numInstances)
ang_vels = np.zeros(self.numInstances)
ang_vels_full = np.zeros((self.numInstances,3))
ang_vels_res = np.zeros(self.numInstances)
for i in range(self.numInstances):
height = height_diff[i]
curr_part_vel = curr_part_vels[i]
bar_rot = R.from_euler('x',curr_bar_state[i,1,0])
currParts = np.concatenate([curr_part_state[i,:,0,np.newaxis],curr_part_heights[i,:,np.newaxis],curr_part_state[i,:,1,np.newaxis]],axis=1)
rel_pos = currParts-curr_bar_state[i,0]
trans_pos = bar_rot.inv().apply(rel_pos)
ang_vel = self.get_angular_vel(currParts,curr_part_vel)
# w = np.mean(rel_pos,axis=0)[1] if np.mean(rel_pos,axis=0)[1]>0.5 else 0
w = 1 if np.mean(rel_pos,axis=0)[1]>0.5 else 0
# if(i==0):
# print(np.mean(rel_pos,axis=0)[1])
# print("Vel mag", np.mean(np.linalg.norm(curr_part_vel,axis=1)))
ang_vels_full[i] = 5*ang_vel*w
ang_vel_proj =np.dot(ang_vel,np.array([1,0,0]))*w
ang_vel_res = np.linalg.norm(ang_vel - ang_vel_proj*np.array([1,0,0]))
ang_vels[i] = np.clip(4*(ang_vel_proj),-1,1)
# ang_vels[i] = -4*(ang_vel_proj)
ang_vels_res[i] = (ang_vel_res)
# Heavy penalty on low particle heights
# Clipped ang vel val
# Only height reward
self.set_aux_info(ang_vels_full)
height_diff[height_diff>0] = 0.1+height_diff[height_diff>0]*10
height_diff[height_diff<0] *= 0.1
rewards = 0.1*0*height_diff+ang_vels
# print(ang_vels[0])
# if self.currCurriculum == 1:
# rewards -=-ang_vels_res
self.rolloutRet += rewards
info = {
# 'Total Reward': rewards[0],
'Height' : 0.1*height_diff[0],
'ang_vel': ang_vels[0],
# 'com_diff': com_diff[0]
}
reward_decomp = [0,0,0]
if (len(self.rwdBuffer) >= 100):
self.rwdBuffer.pop(0)
self.rwdBuffer.append(reward_decomp)
return obs, rewards, done, info
def _get_obs(self):
bar_states, part_states, part_heights,part_vels = self.get_state()
obs_list = []
for i in range(self.numInstances):
stage = self.stage[i]
part_state = part_states[i]
valid_idx = (part_state[:, 0] > -self.mapHalfExtent) & (part_state[:, 0] < self.mapHalfExtent) & (
part_state[:, 1] > -self.mapHalfExtent) & (part_state[:, 1] < self.mapHalfExtent)
part_state = part_state[valid_idx]
part_height = part_heights[i]
part_height = part_height[valid_idx]
part_vel = part_vels[i]
part_vel = part_vel[valid_idx]
bar_state = bar_states[i]
bar_y_rot_vec = np.array([np.cos(bar_state[1, 1]), np.sin(bar_state[1, 1])])
# bar_rot = np.zeros((2, 2))
# bar_rot[0, 0] = bar_y_rot_vec[0]
# bar_rot[0, 1] = -bar_y_rot_vec[1]
# bar_rot[1, 0] = bar_y_rot_vec[1]
# bar_rot[1, 1] = bar_y_rot_vec[0]
# density = self.get_particle_density(
# part_state, bar_state, bar_rot, normalized=True)
# height_map = self.get_mean_height_map(part_state, bar_state, bar_rot, part_height)
part_pos_xyz = np.concatenate([part_state[:,0,np.newaxis],part_height[:,np.newaxis],part_state[:,1,np.newaxis]],axis=1)
height_map = self.get_mean_height_map(part_pos_xyz, bar_state)
# if(i==0):
# print(np.max(heightz))
ang_vel = self.get_angular_vel(part_pos_xyz,part_vel) #3
bar_pos = bar_state[0] # 3
bar_ang_x = np.array([np.cos(bar_state[1, 0]), np.sin(bar_state[1, 0])]) # 2
bar_vel = bar_state[2] # 3
bar_ang_vel_x = np.array([np.cos(bar_state[3, 0]), np.sin(bar_state[3, 0])]) # 2
# if(i==0):
# print(part_pos_xyz)
# print(part_vel)
# print(ang_vel)
bar_info = np.concatenate([bar_pos, bar_ang_x, bar_vel, bar_ang_vel_x,bar_vel])
obs = np.concatenate(
[bar_info, height_map.flatten()
])
obs_list.append(obs)
return np.array(obs_list)
def get_particle_density(self, particles, bar_state, rot, normalized=True, width=2.5):
if (particles.shape[0] == 0):
return | np.zeros((self.resolution, self.resolution)) | numpy.zeros |
"""
Helper functions which obtain forces and energies
corresponding to atoms in structures. These functions automatically
cast atoms into their respective atomic environments.
"""
import numpy as np
from flare.gp import GaussianProcess
from flare.struc import Structure
from copy import deepcopy
from flare.predict import predict_on_structure_par, \
predict_on_atom, predict_on_atom_en, \
predict_on_structure_par_en
import pytest
def fake_predict(_, __):
return np.random.uniform(-1, 1), np.random.uniform(-1, 1)
def fake_predict_local_energy(_):
return np.random.uniform(-1, 1)
_fake_gp = GaussianProcess(kernel_name='2_sc', cutoffs=[5], hyps=[1, 1, 1])
_fake_structure = Structure(cell=np.eye(3), species=[1, 1, 1],
positions=np.random.uniform(0, 1, size=(3, 3)))
_fake_gp.predict = fake_predict
_fake_gp.predict_local_energy = fake_predict_local_energy
assert isinstance(_fake_gp.predict(1, 1), tuple)
assert isinstance(_fake_gp.predict_local_energy(1), float)
@pytest.mark.parametrize('n_cpu', [None, 1, 2])
def test_predict_on_structure_par(n_cpu):
# Predict only on the first atom, and make rest NAN
selective_atoms = [0]
skipped_atom_value = np.nan
forces, stds = predict_on_structure_par(_fake_structure,
_fake_gp,
n_cpus=n_cpu,
write_to_structure=False,
selective_atoms=selective_atoms,
skipped_atom_value=skipped_atom_value)
for x in forces[0][:]:
assert isinstance(x, float)
for x in forces[1:]:
assert np.isnan(x).all()
# Predict only on the second and third, and make rest 0
selective_atoms = [1, 2]
skipped_atom_value = 0
forces, stds = predict_on_structure_par(_fake_structure,
_fake_gp,
write_to_structure=False,
n_cpus=n_cpu,
selective_atoms=selective_atoms,
skipped_atom_value=skipped_atom_value)
for x in forces[1]:
assert isinstance(x, float)
for x in forces[2]:
assert isinstance(x, float)
assert np.equal(forces[0], 0).all()
# Make selective atoms be all and ensure results are normal
selective_atoms = [0, 1, 2]
forces, stds = predict_on_structure_par(_fake_structure,
_fake_gp,
write_to_structure=True,
n_cpus=n_cpu,
selective_atoms=selective_atoms,
skipped_atom_value=skipped_atom_value)
for x in forces.flatten():
assert isinstance(x, float)
for x in stds.flatten():
assert isinstance(x, float)
assert np.array_equal(_fake_structure.forces, forces)
assert np.array_equal(_fake_structure.stds, stds)
# Make selective atoms be nothing and ensure results are normal
forces, stds = predict_on_structure_par(_fake_structure,
_fake_gp,
write_to_structure=True,
n_cpus=n_cpu,
selective_atoms=None,
skipped_atom_value=skipped_atom_value)
for x in forces.flatten():
assert isinstance(x, float)
for x in stds.flatten():
assert isinstance(x, float)
assert np.array_equal(_fake_structure.forces, forces)
assert np.array_equal(_fake_structure.stds, stds)
# Get new examples to also test the results not being written
selective_atoms = [0, 1]
forces, stds = predict_on_structure_par(_fake_structure,
_fake_gp,
write_to_structure=True,
n_cpus=n_cpu,
selective_atoms=selective_atoms,
skipped_atom_value=skipped_atom_value)
for x in forces.flatten():
assert isinstance(x, float)
for x in stds.flatten():
assert isinstance(x, float)
assert | np.array_equal(_fake_structure.forces[:2][:], forces[:2][:]) | numpy.array_equal |
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import logging
import pickle
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
from Capsule_net import Capsule
batch_size = 100
nb_epoch = 10
hidden_dim = 120
test = pd.read_csv("./corpus/imdb/testData.tsv", header=0,
delimiter="\t", quoting=3)
def get_idx_from_sent(sent, word_idx_map):
"""
Transforms sentence into a list of indices. Pad with zeroes.
"""
x = []
words = sent.split()
for word in words:
if word in word_idx_map:
x.append(word_idx_map[word])
else:
x.append(1)
return x
def make_idx_data(revs, word_idx_map, maxlen=60):
"""
Transforms sentences into a 2-d matrix.
"""
X_train, X_test, X_dev, y_train, y_dev = [], [], [], [], []
for rev in revs:
sent = get_idx_from_sent(rev['text'], word_idx_map)
y = rev['y']
if rev['split'] == 1:
X_train.append(sent)
y_train.append(y)
elif rev['split'] == 0:
X_dev.append(sent)
y_dev.append(y)
elif rev['split'] == -1:
X_test.append(sent)
X_train = keras.preprocessing.sequence.pad_sequences(np.array(X_train), maxlen=maxlen)
X_dev = keras.preprocessing.sequence.pad_sequences(np.array(X_dev), maxlen=maxlen)
X_test = keras.preprocessing.sequence.pad_sequences(np.array(X_test), maxlen=maxlen)
# X_valid = sequence.pad_sequences(np.array(X_valid), maxlen=maxlen)
y_train = keras.utils.to_categorical(np.array(y_train))
y_dev = keras.utils.to_categorical( | np.array(y_dev) | numpy.array |
from rdkit import Chem
from rdkit.Chem import QED
import numpy as np
import networkx as nx
from openchem.utils.sa_score import sascorer
from rdkit.Chem import Descriptors
def reward_penalized_log_p(smiles, return_mean=True):
"""
Reward that consists of log p penalized by SA and # long cycles,
as described in (Kusner et al. 2017). Scores are normalized based on the
statistics of 250k_rndm_zinc_drugs_clean.smi dataset
:param mol: rdkit mol object
:return: float
"""
# normalization constants, statistics from 250k_rndm_zinc_drugs_clean.smi
logP_mean = 2.4570953396190123
logP_std = 1.434324401111988
SA_mean = -3.0525811293166134
SA_std = 0.8335207024513095
cycle_mean = -0.0485696876403053
cycle_std = 0.2860212110245455
mols = [Chem.MolFromSmiles(sm) for sm in smiles]
log_p = np.array([Chem.Descriptors.MolLogP(mol) for mol in mols])
SA = -np.array(sa_score(smiles, return_mean=False))
# cycle score
cycle_score = []
for mol in mols:
cycle_list = nx.cycle_basis(nx.Graph(Chem.rdmolops.GetAdjacencyMatrix(mol)))
if len(cycle_list) == 0:
cycle_length = 0
else:
cycle_length = max([len(j) for j in cycle_list])
if cycle_length <= 6:
cycle_length = 0
else:
cycle_length = cycle_length - 6
cycle_score.append(-cycle_length)
cycle_score = np.array(cycle_score)
normalized_log_p = (log_p - logP_mean) / logP_std
normalized_SA = (SA - SA_mean) / SA_std
normalized_cycle = (cycle_score - cycle_mean) / cycle_std
score = list(normalized_log_p + normalized_SA + normalized_cycle)
if return_mean:
return | np.mean(score) | numpy.mean |
import copy
import inspect
import logging
import math
import os
import pprint
import time
from typing import Union
import networkx as nx
import numpy as np
import pandas as pd
from autogluon.common.utils.log_utils import set_logger_verbosity
from autogluon.common.utils.pandas_utils import get_approximate_df_mem_usage
from autogluon.common.utils.utils import setup_outputdir
from autogluon.core.calibrate.temperature_scaling import tune_temperature_scaling
from autogluon.core.calibrate.conformity_score import compute_conformity_score
from autogluon.core.constants import BINARY, MULTICLASS, REGRESSION, QUANTILE, AUTO_WEIGHT, BALANCE_WEIGHT, PSEUDO_MODEL_SUFFIX, PROBLEM_TYPES_CLASSIFICATION
from autogluon.core.data.label_cleaner import LabelCleanerMulticlassToBinary
from autogluon.core.dataset import TabularDataset
from autogluon.core.pseudolabeling.pseudolabeling import filter_pseudo, filter_ensemble_pseudo
from autogluon.core.scheduler.scheduler_factory import scheduler_factory
from autogluon.core.trainer import AbstractTrainer
from autogluon.core.utils import get_pred_from_proba_df
from autogluon.core.utils import plot_performance_vs_trials, plot_summary_of_models, plot_tabular_models
from autogluon.core.utils.decorators import apply_presets
from autogluon.core.utils.loaders import load_pkl, load_str
from autogluon.core.utils.savers import save_pkl, save_str
from autogluon.core.utils.utils import default_holdout_frac
from ..configs.feature_generator_presets import get_default_feature_generator
from ..configs.hyperparameter_configs import get_hyperparameter_config
from ..configs.presets_configs import tabular_presets_dict
from ..learner import AbstractLearner, DefaultLearner
logger = logging.getLogger(__name__) # return autogluon root logger
# TODO: num_bag_sets -> ag_args
# Extra TODOs (Stretch): Can occur post v0.1
# TODO: make core_kwargs a kwargs argument to predictor.fit
# TODO: add aux_kwargs to predictor.fit
# TODO: add pip freeze + python version output after fit + log file, validate that same pip freeze on load as cached
# TODO: predictor.clone()
# TODO: Add logging comments that models are serialized on disk after fit
# TODO: consider adding kwarg option for data which has already been preprocessed by feature generator to skip feature generation.
# TODO: Resolve raw text feature usage in default feature generator
# Done for Tabular
# TODO: Remove all `time_limits` in project, replace with `time_limit`
class TabularPredictor:
"""
AutoGluon TabularPredictor predicts values in a column of a tabular dataset (classification or regression).
Parameters
----------
label : str
Name of the column that contains the target variable to predict.
problem_type : str, default = None
Type of prediction problem, i.e. is this a binary/multiclass classification or regression problem (options: 'binary', 'multiclass', 'regression', 'quantile').
If `problem_type = None`, the prediction problem type is inferred based on the label-values in provided dataset.
eval_metric : function or str, default = None
Metric by which predictions will be ultimately evaluated on test data.
AutoGluon tunes factors such as hyperparameters, early-stopping, ensemble-weights, etc. in order to improve this metric on validation data.
If `eval_metric = None`, it is automatically chosen based on `problem_type`.
Defaults to 'accuracy' for binary and multiclass classification, 'root_mean_squared_error' for regression, and 'pinball_loss' for quantile.
Otherwise, options for classification:
['accuracy', 'balanced_accuracy', 'f1', 'f1_macro', 'f1_micro', 'f1_weighted',
'roc_auc', 'roc_auc_ovo_macro', 'average_precision', 'precision', 'precision_macro', 'precision_micro',
'precision_weighted', 'recall', 'recall_macro', 'recall_micro', 'recall_weighted', 'log_loss', 'pac_score']
Options for regression:
['root_mean_squared_error', 'mean_squared_error', 'mean_absolute_error', 'median_absolute_error', 'r2']
For more information on these options, see `sklearn.metrics`: https://scikit-learn.org/stable/modules/classes.html#sklearn-metrics-metrics
You can also pass your own evaluation function here as long as it follows formatting of the functions defined in folder `autogluon.core.metrics`.
path : str, default = None
Path to directory where models and intermediate outputs should be saved.
If unspecified, a time-stamped folder called "AutogluonModels/ag-[TIMESTAMP]" will be created in the working directory to store all models.
Note: To call `fit()` twice and save all results of each fit, you must specify different `path` locations or don't specify `path` at all.
Otherwise files from first `fit()` will be overwritten by second `fit()`.
verbosity : int, default = 2
Verbosity levels range from 0 to 4 and control how much information is printed.
Higher levels correspond to more detailed print statements (you can set verbosity = 0 to suppress warnings).
If using logging, you can alternatively control amount of information printed via `logger.setLevel(L)`,
where `L` ranges from 0 to 50 (Note: higher values of `L` correspond to fewer print statements, opposite of verbosity levels).
sample_weight : str, default = None
If specified, this column-name indicates which column of the data should be treated as sample weights. This column will NOT be considered as a predictive feature.
Sample weights should be non-negative (and cannot be nan), with larger values indicating which rows are more important than others.
If you want your usage of sample weights to match results obtained outside of this Predictor, then ensure sample weights for your training (or tuning) data sum to the number of rows in the training (or tuning) data.
You may also specify two special strings: 'auto_weight' (automatically choose a weighting strategy based on the data) or 'balance_weight' (equally weight classes in classification, no effect in regression). If specifying your own sample_weight column, make sure its name does not match these special strings.
weight_evaluation : bool, default = False
Only considered when `sample_weight` column is not None. Determines whether sample weights should be taken into account when computing evaluation metrics on validation/test data.
If True, then weighted metrics will be reported based on the sample weights provided in the specified `sample_weight` (in which case `sample_weight` column must also be present in test data).
In this case, the 'best' model used by default for prediction will also be decided based on a weighted version of evaluation metric.
Note: we do not recommend specifying `weight_evaluation` when `sample_weight` is 'auto_weight' or 'balance_weight', instead specify appropriate `eval_metric`.
groups : str, default = None
[Experimental] If specified, AutoGluon will use the column named the value of groups in `train_data` during `.fit` as the data splitting indices for the purposes of bagging.
This column will not be used as a feature during model training.
This parameter is ignored if bagging is not enabled. To instead specify a custom validation set with bagging disabled, specify `tuning_data` in `.fit`.
The data will be split via `sklearn.model_selection.LeaveOneGroupOut`.
Use this option to control the exact split indices AutoGluon uses.
It is not recommended to use this option unless it is required for very specific situations.
Bugs may arise from edge cases if the provided groups are not valid to properly train models, such as if not all classes are present during training in multiclass classification. It is up to the user to sanitize their groups.
As an example, if you want your data folds to preserve adjacent rows in the table without shuffling, then for 3 fold bagging with 6 rows of data, the groups column values should be [0, 0, 1, 1, 2, 2].
**kwargs :
learner_type : AbstractLearner, default = DefaultLearner
A class which inherits from `AbstractLearner`. This dictates the inner logic of predictor.
If you don't know what this is, keep it as the default.
learner_kwargs : dict, default = None
Kwargs to send to the learner. Options include:
positive_class : str or int, default = None
Used to determine the positive class in binary classification.
This is used for certain metrics such as 'f1' which produce different scores depending on which class is considered the positive class.
If not set, will be inferred as the second element of the existing unique classes after sorting them.
If classes are [0, 1], then 1 will be selected as the positive class.
If classes are ['def', 'abc'], then 'def' will be selected as the positive class.
If classes are [True, False], then True will be selected as the positive class.
ignored_columns : list, default = None
Banned subset of column names that predictor may not use as predictive features (e.g. unique identifier to a row or user-ID).
These columns are ignored during `fit()`.
label_count_threshold : int, default = 10
For multi-class classification problems, this is the minimum number of times a label must appear in dataset in order to be considered an output class.
AutoGluon will ignore any classes whose labels do not appear at least this many times in the dataset (i.e. will never predict them).
cache_data : bool, default = True
When enabled, the training and validation data are saved to disk for future reuse.
Enables advanced functionality in predictor such as `fit_extra()` and feature importance calculation on the original data.
trainer_type : AbstractTrainer, default = AutoTrainer
A class inheriting from `AbstractTrainer` that controls training/ensembling of many models.
If you don't know what this is, keep it as the default.
Attributes
----------
path : str
Path to directory where all models used by this Predictor are stored.
problem_type : str
What type of prediction problem this Predictor has been trained for.
eval_metric : function or str
What metric is used to evaluate predictive performance.
label : str
Name of table column that contains data from the variable to predict (often referred to as: labels, response variable, target variable, dependent variable, Y, etc).
feature_metadata : :class:`autogluon.common.features.feature_metadata.FeatureMetadata`
Inferred data type of each predictive variable after preprocessing transformation (i.e. column of training data table used to predict `label`).
Contains both raw dtype and special dtype information. Each feature has exactly 1 raw dtype (such as 'int', 'float', 'category') and zero to many special dtypes (such as 'datetime_as_int', 'text', 'text_ngram').
Special dtypes are AutoGluon specific feature types that are used to identify features with meaning beyond what the raw dtype can convey.
`feature_metadata.type_map_raw`: Dictionary of feature name -> raw dtype mappings.
`feature_metadata.type_group_map_special`: Dictionary of lists of special feature names, grouped by special feature dtype.
positive_class : str or int
Returns the positive class name in binary classification. Useful for computing metrics such as F1 which require a positive and negative class.
In binary classification, :meth:`TabularPredictor.predict_proba` returns the estimated probability that each row belongs to the positive class.
Will print a warning and return None if called when `predictor.problem_type != 'binary'`.
class_labels : list
For multiclass problems, this list contains the class labels in sorted order of `predict_proba()` output.
For binary problems, this list contains the class labels in sorted order of `predict_proba(as_multiclass=True)` output.
`class_labels[0]` corresponds to internal label = 0 (negative class), `class_labels[1]` corresponds to internal label = 1 (positive class).
This is relevant for certain metrics such as F1 where True and False labels impact the metric score differently.
For other problem types, will equal None.
For example if `pred = predict_proba(x, as_multiclass=True)`, then ith index of `pred` provides predicted probability that `x` belongs to class given by `class_labels[i]`.
class_labels_internal : list
For multiclass problems, this list contains the internal class labels in sorted order of internal `predict_proba()` output.
For binary problems, this list contains the internal class labels in sorted order of internal `predict_proba(as_multiclass=True)` output.
The value will always be `class_labels_internal=[0, 1]` for binary problems, with 0 as the negative class, and 1 as the positive class.
For other problem types, will equal None.
class_labels_internal_map : dict
For binary and multiclass classification problems, this dictionary contains the mapping of the original labels to the internal labels.
For example, in binary classification, label values of 'True' and 'False' will be mapped to the internal representation `1` and `0`.
Therefore, class_labels_internal_map would equal {'True': 1, 'False': 0}
For other problem types, will equal None.
For multiclass, it is possible for not all of the label values to have a mapping.
This indicates that the internal models will never predict those missing labels, and training rows associated with the missing labels were dropped.
"""
Dataset = TabularDataset
predictor_file_name = 'predictor.pkl'
_predictor_version_file_name = '__version__'
def __init__(
self,
label,
problem_type=None,
eval_metric=None,
path=None,
verbosity=2,
sample_weight=None,
weight_evaluation=False,
groups=None,
**kwargs
):
self.verbosity = verbosity
set_logger_verbosity(self.verbosity)
if sample_weight == AUTO_WEIGHT: # TODO: update auto_weight strategy and make it the default
sample_weight = None
logger.log(15, f"{AUTO_WEIGHT} currently does not use any sample weights.")
self.sample_weight = sample_weight
self.weight_evaluation = weight_evaluation # TODO: sample_weight and weight_evaluation can both be properties that link to self._learner.sample_weight, self._learner.weight_evaluation
if self.sample_weight in [AUTO_WEIGHT, BALANCE_WEIGHT] and self.weight_evaluation:
logger.warning(
f"We do not recommend specifying weight_evaluation when sample_weight='{self.sample_weight}', instead specify appropriate eval_metric.")
self._validate_init_kwargs(kwargs)
path = setup_outputdir(path)
learner_type = kwargs.pop('learner_type', DefaultLearner)
learner_kwargs = kwargs.pop('learner_kwargs', dict())
quantile_levels = kwargs.get('quantile_levels', None)
self._learner: AbstractLearner = learner_type(path_context=path, label=label, feature_generator=None,
eval_metric=eval_metric, problem_type=problem_type,
quantile_levels=quantile_levels,
sample_weight=self.sample_weight,
weight_evaluation=self.weight_evaluation, groups=groups,
**learner_kwargs)
self._learner_type = type(self._learner)
self._trainer = None
@property
def class_labels(self):
return self._learner.class_labels
@property
def class_labels_internal(self):
return self._learner.label_cleaner.ordered_class_labels_transformed
@property
def class_labels_internal_map(self):
return self._learner.label_cleaner.inv_map
@property
def quantile_levels(self):
return self._learner.quantile_levels
@property
def eval_metric(self):
return self._learner.eval_metric
@property
def problem_type(self):
return self._learner.problem_type
def features(self, feature_stage: str = 'original'):
"""
Returns a list of feature names dependent on the value of feature_stage.
Parameters
----------
feature_stage : str, default = 'original'
If 'original', returns the list of features specified in the original training data. This feature set is required in input data when making predictions.
If 'transformed', returns the list of features after pre-processing by the feature generator.
Returns
-------
Returns a list of feature names
"""
if feature_stage == 'original':
return self.feature_metadata_in.get_features()
elif feature_stage == 'transformed':
return self.feature_metadata.get_features()
else:
raise ValueError(f"Unknown feature_stage: '{feature_stage}'. Must be one of {['original', 'transformed']}")
@property
def feature_metadata(self):
return self._trainer.feature_metadata
@property
def feature_metadata_in(self):
return self._learner.feature_generator.feature_metadata_in
@property
def label(self):
return self._learner.label
@property
def path(self):
return self._learner.path
@apply_presets(tabular_presets_dict)
def fit(self,
train_data,
tuning_data=None,
time_limit=None,
presets=None,
hyperparameters=None,
feature_metadata='infer',
**kwargs):
"""
Fit models to predict a column of a data table (label) based on the other columns (features).
Parameters
----------
train_data : str or :class:`TabularDataset` or :class:`pd.DataFrame`
Table of the training data, which is similar to a pandas DataFrame.
If str is passed, `train_data` will be loaded using the str value as the file path.
tuning_data : str or :class:`TabularDataset` or :class:`pd.DataFrame`, default = None
Another dataset containing validation data reserved for tuning processes such as early stopping and hyperparameter tuning.
This dataset should be in the same format as `train_data`.
If str is passed, `tuning_data` will be loaded using the str value as the file path.
Note: final model returned may be fit on `tuning_data` as well as `train_data`. Do not provide your evaluation test data here!
In particular, when `num_bag_folds` > 0 or `num_stack_levels` > 0, models will be trained on both `tuning_data` and `train_data`.
If `tuning_data = None`, `fit()` will automatically hold out some random validation examples from `train_data`.
time_limit : int, default = None
Approximately how long `fit()` should run for (wallclock time in seconds).
If not specified, `fit()` will run until all models have completed training, but will not repeatedly bag models unless `num_bag_sets` is specified.
presets : list or str or dict, default = ['medium_quality_faster_train']
List of preset configurations for various arguments in `fit()`. Can significantly impact predictive accuracy, memory-footprint, and inference latency of trained models, and various other properties of the returned `predictor`.
It is recommended to specify presets and avoid specifying most other `fit()` arguments or model hyperparameters prior to becoming familiar with AutoGluon.
As an example, to get the most accurate overall predictor (regardless of its efficiency), set `presets='best_quality'`.
To get good quality with minimal disk usage, set `presets=['good_quality_faster_inference_only_refit', 'optimize_for_deployment']`
Any user-specified arguments in `fit()` will override the values used by presets.
If specifying a list of presets, later presets will override earlier presets if they alter the same argument.
For precise definitions of the provided presets, see file: `autogluon/tabular/configs/presets_configs.py`.
Users can specify custom presets by passing in a dictionary of argument values as an element to the list.
Available Presets: ['best_quality', 'high_quality_fast_inference_only_refit', 'good_quality_faster_inference_only_refit', 'medium_quality_faster_train', 'optimize_for_deployment', 'ignore_text']
It is recommended to only use one `quality` based preset in a given call to `fit()` as they alter many of the same arguments and are not compatible with each-other.
In-depth Preset Info:
best_quality={'auto_stack': True}
Best predictive accuracy with little consideration to inference time or disk usage. Achieve even better results by specifying a large time_limit value.
Recommended for applications that benefit from the best possible model accuracy.
high_quality_fast_inference_only_refit={'auto_stack': True, 'refit_full': True, 'set_best_to_refit_full': True, '_save_bag_folds': False}
High predictive accuracy with fast inference. ~10x-200x faster inference and ~10x-200x lower disk usage than `best_quality`.
Recommended for applications that require reasonable inference speed and/or model size.
good_quality_faster_inference_only_refit={'auto_stack': True, 'refit_full': True, 'set_best_to_refit_full': True, '_save_bag_folds': False, 'hyperparameters': 'light'}
Good predictive accuracy with very fast inference. ~4x faster inference and ~4x lower disk usage than `high_quality_fast_inference_only_refit`.
Recommended for applications that require fast inference speed.
medium_quality_faster_train={'auto_stack': False}
Medium predictive accuracy with very fast inference and very fast training time. ~20x faster training than `good_quality_faster_inference_only_refit`.
This is the default preset in AutoGluon, but should generally only be used for quick prototyping, as `good_quality_faster_inference_only_refit` results in significantly better predictive accuracy and faster inference time.
optimize_for_deployment={'keep_only_best': True, 'save_space': True}
Optimizes result immediately for deployment by deleting unused models and removing training artifacts.
Often can reduce disk usage by ~2-4x with no negatives to model accuracy or inference speed.
This will disable numerous advanced functionality, but has no impact on inference.
This will make certain functionality less informative, such as `predictor.leaderboard()` and `predictor.fit_summary()`.
Because unused models will be deleted under this preset, methods like `predictor.leaderboard()` and `predictor.fit_summary()` will no longer show the full set of models that were trained during `fit()`.
Recommended for applications where the inner details of AutoGluon's training is not important and there is no intention of manually choosing between the final models.
This preset pairs well with the other presets such as `good_quality_faster_inference_only_refit` to make a very compact final model.
Identical to calling `predictor.delete_models(models_to_keep='best', dry_run=False)` and `predictor.save_space()` directly after `fit()`.
ignore_text={'_feature_generator_kwargs': {'enable_text_ngram_features': False, 'enable_text_special_features': False, 'enable_raw_text_features': False}}
Disables automated feature generation when text features are detected.
This is useful to determine how beneficial text features are to the end result, as well as to ensure features are not mistaken for text when they are not.
Ignored if `feature_generator` was also specified.
hyperparameters : str or dict, default = 'default'
Determines the hyperparameters used by the models.
If `str` is passed, will use a preset hyperparameter configuration.
Valid `str` options: ['default', 'light', 'very_light', 'toy', 'multimodal']
'default': Default AutoGluon hyperparameters intended to maximize accuracy without significant regard to inference time or disk usage.
'light': Results in smaller models. Generally will make inference speed much faster and disk usage much lower, but with worse accuracy.
'very_light': Results in much smaller models. Behaves similarly to 'light', but in many cases with over 10x less disk usage and a further reduction in accuracy.
'toy': Results in extremely small models. Only use this when prototyping, as the model quality will be severely reduced.
'multimodal': [EXPERIMENTAL] Trains a multimodal transformer model alongside tabular models. Requires that some text columns appear in the data, a GPU, and CUDA-enabled MXNet.
When combined with 'best_quality' `presets` option, this can achieve extremely strong results in multimodal data tables that contain columns with text in addition to numeric/categorical columns.
Reference `autogluon/tabular/configs/hyperparameter_configs.py` for information on the hyperparameters associated with each preset.
Keys are strings that indicate which model types to train.
Stable model options include:
'GBM' (LightGBM)
'CAT' (CatBoost)
'XGB' (XGBoost)
'RF' (random forest)
'XT' (extremely randomized trees)
'KNN' (k-nearest neighbors)
'LR' (linear regression)
'NN' (neural network with MXNet backend)
'FASTAI' (neural network with FastAI backend)
Experimental model options include:
'FASTTEXT' (FastText)
'AG_TEXT_NN' (Multimodal Text+Tabular model, GPU is required)
'TRANSF' (Tabular Transformer, GPU is recommended)
If a certain key is missing from hyperparameters, then `fit()` will not train any models of that type. Omitting a model key from hyperparameters is equivalent to including this model key in `excluded_model_types`.
For example, set `hyperparameters = { 'NN':{...} }` if say you only want to train neural networks and no other types of models.
Values = dict of hyperparameter settings for each model type, or list of dicts.
Each hyperparameter can either be a single fixed value or a search space containing many possible values.
Unspecified hyperparameters will be set to default values (or default search spaces if `hyperparameter_tune = True`).
Caution: Any provided search spaces will be overridden by fixed defaults if `hyperparameter_tune = False`.
To train multiple models of a given type, set the value to a list of hyperparameter dictionaries.
For example, `hyperparameters = {'RF': [{'criterion': 'gini'}, {'criterion': 'entropy'}]}` will result in 2 random forest models being trained with separate hyperparameters.
Advanced functionality: Custom models
`hyperparameters` can also take special string values instead of a dictionary of model parameters which maps to a pre-configured model configuration (currently supported options = ['GBMLarge']).
These additional models will be trained using custom pre-specified hyperparameter settings that are known to work well.
Advanced functionality: Custom stack levels
By default, AutoGluon re-uses the same models and model hyperparameters at each level during stack ensembling.
To customize this behaviour, create a hyperparameters dictionary separately for each stack level, and then add them as values to a new dictionary, with keys equal to the stack level.
Example: `hyperparameters = {1: {'RF': rf_params1}, 2: {'CAT': [cat_params1, cat_params2], 'NN': {}}}`
This will result in a stack ensemble that has one custom random forest in level 1 followed by two CatBoost models with custom hyperparameters and a default neural network in level 2, for a total of 4 models.
If a level is not specified in `hyperparameters`, it will default to using the highest specified level to train models. This can also be explicitly controlled by adding a 'default' key.
Default:
hyperparameters = {
'NN': {},
'GBM': [
{'extra_trees': True, 'ag_args': {'name_suffix': 'XT'}},
{},
'GBMLarge',
],
'CAT': {},
'XGB': {},
'FASTAI': {},
'RF': [
{'criterion': 'gini', 'ag_args': {'name_suffix': 'Gini', 'problem_types': ['binary', 'multiclass']}},
{'criterion': 'entropy', 'ag_args': {'name_suffix': 'Entr', 'problem_types': ['binary', 'multiclass']}},
{'criterion': 'mse', 'ag_args': {'name_suffix': 'MSE', 'problem_types': ['regression']}},
],
'XT': [
{'criterion': 'gini', 'ag_args': {'name_suffix': 'Gini', 'problem_types': ['binary', 'multiclass']}},
{'criterion': 'entropy', 'ag_args': {'name_suffix': 'Entr', 'problem_types': ['binary', 'multiclass']}},
{'criterion': 'mse', 'ag_args': {'name_suffix': 'MSE', 'problem_types': ['regression']}},
],
'KNN': [
{'weights': 'uniform', 'ag_args': {'name_suffix': 'Unif'}},
{'weights': 'distance', 'ag_args': {'name_suffix': 'Dist'}},
],
}
Details regarding the hyperparameters you can specify for each model are provided in the following files:
NN: `autogluon.tabular.models.tabular_nn.hyperparameters.parameters`
Note: certain hyperparameter settings may cause these neural networks to train much slower.
GBM: `autogluon.tabular.models.lgb.hyperparameters.parameters`
See also the lightGBM docs: https://lightgbm.readthedocs.io/en/latest/Parameters.html
CAT: `autogluon.tabular.models.catboost.hyperparameters.parameters`
See also the CatBoost docs: https://catboost.ai/docs/concepts/parameter-tuning.html
XGB: `autogluon.tabular.models.xgboost.hyperparameters.parameters`
See also the XGBoost docs: https://xgboost.readthedocs.io/en/latest/parameter.html
FASTAI: `autogluon.tabular.models.fastainn.hyperparameters.parameters`
See also the FastAI docs: https://docs.fast.ai/tabular.models.html
RF: See sklearn documentation: https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
Note: Hyperparameter tuning is disabled for this model.
XT: See sklearn documentation: https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
Note: Hyperparameter tuning is disabled for this model.
KNN: See sklearn documentation: https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html
Note: Hyperparameter tuning is disabled for this model.
LR: `autogluon.tabular.models.lr.hyperparameters.parameters`
Note: Hyperparameter tuning is disabled for this model.
Note: 'penalty' parameter can be used for regression to specify regularization method: 'L1' and 'L2' values are supported.
Advanced functionality: Custom AutoGluon model arguments
These arguments are optional and can be specified in any model's hyperparameters.
Example: `hyperparameters = {'RF': {..., 'ag_args': {'name_suffix': 'CustomModelSuffix', 'disable_in_hpo': True}}`
ag_args: Dictionary of customization options related to meta properties of the model such as its name, the order it is trained, the problem types it is valid for, and the type of HPO it utilizes.
Valid keys:
name: (str) The name of the model. This overrides AutoGluon's naming logic and all other name arguments if present.
name_main: (str) The main name of the model. Example: 'RandomForest'.
name_prefix: (str) Add a custom prefix to the model name. Unused by default.
name_suffix: (str) Add a custom suffix to the model name. Unused by default.
priority: (int) Determines the order in which the model is trained. Larger values result in the model being trained earlier. Default values range from 100 (KNN) to 0 (custom), dictated by model type. If you want this model to be trained first, set priority = 999.
problem_types: (list) List of valid problem types for the model. `problem_types=['binary']` will result in the model only being trained if `problem_type` is 'binary'.
disable_in_hpo: (bool) If True, the model will only be trained if `hyperparameter_tune_kwargs=None`.
valid_stacker: (bool) If False, the model will not be trained as a level 2 or higher stacker model.
valid_base: (bool) If False, the model will not be trained as a level 1 (base) model.
hyperparameter_tune_kwargs: (dict) Refer to :meth:`TabularPredictor.fit` hyperparameter_tune_kwargs argument. If specified here, will override global HPO settings for this model.
Reference the default hyperparameters for example usage of these options.
ag_args_fit: Dictionary of model fit customization options related to how and with what constraints the model is trained. These parameters affect stacker fold models, but not stacker models themselves.
Clarification: `time_limit` is the internal time in seconds given to a particular model to train, which is dictated in part by the `time_limit` argument given during `predictor.fit()` but is not the same.
Valid keys:
stopping_metric: (str or :class:`autogluon.core.metrics.Scorer`, default=None) The metric to use for early stopping of the model. If None, model will decide.
max_memory_usage_ratio: (float, default=1.0) The ratio of memory usage relative to the default to allow before early stopping or killing the model. Values greater than 1.0 will be increasingly prone to out-of-memory errors.
max_time_limit_ratio: (float, default=1.0) The ratio of the provided time_limit to use during model `fit()`. If `time_limit=10` and `max_time_limit_ratio=0.3`, time_limit would be changed to 3. Does not alter max_time_limit or min_time_limit values.
max_time_limit: (float, default=None) Maximum amount of time to allow this model to train for (in sec). If the provided time_limit is greater than this value, it will be replaced by max_time_limit.
min_time_limit: (float, default=0) Allow this model to train for at least this long (in sec), regardless of the time limit it would otherwise be granted.
If `min_time_limit >= max_time_limit`, time_limit will be set to min_time_limit.
If `min_time_limit=None`, time_limit will be set to None and the model will have no training time restriction.
num_cpus : (int or str, default='auto')
How many CPUs to use during model fit.
If 'auto', model will decide.
num_gpus : (int or str, default='auto')
How many GPUs to use during model fit.
If 'auto', model will decide. Some models can use GPUs but don't by default due to differences in model quality.
Set to 0 to disable usage of GPUs.
ag_args_ensemble: Dictionary of hyperparameters shared by all models that control how they are ensembled, if bag mode is enabled.
Valid keys:
use_orig_features: (bool) Whether a stack model will use the original features along with the stack features to train (akin to skip-connections). If the model has no stack features (no base models), this value is ignored and the stack model will use the original features.
max_base_models: (int, default=25) Maximum number of base models whose predictions form the features input to this stacker model. If more than `max_base_models` base models are available, only the top `max_base_models` models with highest validation score are used.
max_base_models_per_type: (int, default=5) Similar to `max_base_models`. If more than `max_base_models_per_type` of any particular model type are available, only the top `max_base_models_per_type` of that type are used. This occurs before the `max_base_models` filter.
save_bag_folds: (bool, default=True)
If True, bagged models will save their fold models (the models from each individual fold of bagging). This is required to use bagged models for prediction.
If False, bagged models will not save their fold models. This means that bagged models will not be valid models during inference.
This should only be set to False when planning to call `predictor.refit_full()` or when `refit_full` is set and `set_best_to_refit_full=True`.
Particularly useful if disk usage is a concern. By not saving the fold models, bagged models will use only very small amounts of disk space during training.
In many training runs, this will reduce peak disk usage by >10x.
fold_fitting_strategy: (AbstractFoldFittingStrategy default=auto) Whether to fit folds in parallel or in sequential order.
If parallel_local, folds will be trained in parallel with evenly distributed computing resources. This could bring 2-4x speedup compared to SequentialLocalFoldFittingStrategy, but could consume much more memory.
If sequential_local, folds will be trained in sequential.
If auto, strategy will be determined by OS and whether ray is installed or not. MacOS support for parallel_local is unstable, and may crash if enabled.
num_folds_parallel: (int or str, default='auto') Number of folds to be trained in parallel if using ParallelLocalFoldFittingStrategy. Consider lowering this value if you encounter either out of memory issue or CUDA out of memory issue(when trained on gpu).
if 'auto', will try to train all folds in parallel.
feature_metadata : :class:`autogluon.tabular.FeatureMetadata` or str, default = 'infer'
The feature metadata used in various inner logic in feature preprocessing.
If 'infer', will automatically construct a FeatureMetadata object based on the properties of `train_data`.
In this case, `train_data` is input into :meth:`autogluon.tabular.FeatureMetadata.from_df` to infer `feature_metadata`.
If 'infer' incorrectly assumes the dtypes of features, consider explicitly specifying `feature_metadata`.
**kwargs :
auto_stack : bool, default = False
Whether AutoGluon should automatically utilize bagging and multi-layer stack ensembling to boost predictive accuracy.
Set this = True if you are willing to tolerate longer training times in order to maximize predictive accuracy!
Automatically sets `num_bag_folds` and `num_stack_levels` arguments based on dataset properties.
Note: Setting `num_bag_folds` and `num_stack_levels` arguments will override `auto_stack`.
Note: This can increase training time (and inference time) by up to 20x, but can greatly improve predictive performance.
num_bag_folds : int, default = None
Number of folds used for bagging of models. When `num_bag_folds = k`, training time is roughly increased by a factor of `k` (set = 0 to disable bagging).
Disabled by default (0), but we recommend values between 5-10 to maximize predictive performance.
Increasing num_bag_folds will result in models with lower bias but that are more prone to overfitting.
`num_bag_folds = 1` is an invalid value, and will raise a ValueError.
Values > 10 may produce diminishing returns, and can even harm overall results due to overfitting.
To further improve predictions, avoid increasing `num_bag_folds` much beyond 10 and instead increase `num_bag_sets`.
num_bag_sets : int, default = None
Number of repeats of kfold bagging to perform (values must be >= 1). Total number of models trained during bagging = `num_bag_folds * num_bag_sets`.
Defaults to 1 if `time_limit` is not specified, otherwise 20 (always disabled if `num_bag_folds` is not specified).
Values greater than 1 will result in superior predictive performance, especially on smaller problems and with stacking enabled (reduces overall variance).
num_stack_levels : int, default = None
Number of stacking levels to use in stack ensemble. Roughly increases model training time by factor of `num_stack_levels+1` (set = 0 to disable stack ensembling).
Disabled by default (0), but we recommend values between 1-3 to maximize predictive performance.
To prevent overfitting, `num_bag_folds >= 2` must also be set or else a ValueError will be raised.
holdout_frac : float, default = None
Fraction of train_data to holdout as tuning data for optimizing hyperparameters (ignored unless `tuning_data = None`, ignored if `num_bag_folds != 0` unless `use_bag_holdout == True`).
Default value (if None) is selected based on the number of rows in the training data. Default values range from 0.2 at 2,500 rows to 0.01 at 250,000 rows.
Default value is doubled if `hyperparameter_tune_kwargs` is set, up to a maximum of 0.2.
Disabled if `num_bag_folds >= 2` unless `use_bag_holdout == True`.
use_bag_holdout : bool, default = False
If True, a `holdout_frac` portion of the data is held-out from model bagging.
This held-out data is only used to score models and determine weighted ensemble weights.
Enable this if there is a large gap between score_val and score_test in stack models.
Note: If `tuning_data` was specified, `tuning_data` is used as the holdout data.
Disabled if not bagging.
hyperparameter_tune_kwargs : str or dict, default = None
Hyperparameter tuning strategy and kwargs (for example, how many HPO trials to run).
If None, then hyperparameter tuning will not be performed.
Valid preset values:
'auto': Uses the 'bayesopt' preset.
'random': Performs HPO via random search using local scheduler.
'bayesopt': Performs HPO via bayesian optimization using local scheduler.
For valid dictionary keys, refer to :class:`autogluon.core.scheduler.FIFOScheduler` documentation.
The 'searcher' key is required when providing a dict.
feature_prune_kwargs: dict, default = None
Performs layer-wise feature pruning via recursive feature elimination with permutation feature importance.
This fits all models in a stack layer once, discovers a pruned set of features, fits all models in the stack layer
again with the pruned set of features, and updates input feature lists for models whose validation score improved.
If None, do not perform feature pruning. If empty dictionary, perform feature pruning with default configurations.
For valid dictionary keys, refer to :class:`autogluon.core.utils.feature_selection.FeatureSelector` and
`autogluon.core.trainer.abstract_trainer.AbstractTrainer._proxy_model_feature_prune` documentation.
To force all models to work with the pruned set of features, set force_prune=True in the dictionary.
ag_args : dict, default = None
Keyword arguments to pass to all models (i.e. common hyperparameters shared by all AutoGluon models).
See the `ag_args` argument from "Advanced functionality: Custom AutoGluon model arguments" in the `hyperparameters` argument documentation for valid values.
Identical to specifying `ag_args` parameter for all models in `hyperparameters`.
If a key in `ag_args` is already specified for a model in `hyperparameters`, it will not be altered through this argument.
ag_args_fit : dict, default = None
Keyword arguments to pass to all models.
See the `ag_args_fit` argument from "Advanced functionality: Custom AutoGluon model arguments" in the `hyperparameters` argument documentation for valid values.
Identical to specifying `ag_args_fit` parameter for all models in `hyperparameters`.
If a key in `ag_args_fit` is already specified for a model in `hyperparameters`, it will not be altered through this argument.
ag_args_ensemble : dict, default = None
Keyword arguments to pass to all models.
See the `ag_args_ensemble` argument from "Advanced functionality: Custom AutoGluon model arguments" in the `hyperparameters` argument documentation for valid values.
Identical to specifying `ag_args_ensemble` parameter for all models in `hyperparameters`.
If a key in `ag_args_ensemble` is already specified for a model in `hyperparameters`, it will not be altered through this argument.
excluded_model_types : list, default = None
Banned subset of model types to avoid training during `fit()`, even if present in `hyperparameters`.
Reference `hyperparameters` documentation for what models correspond to each value.
Useful when a particular model type such as 'KNN' or 'custom' is not desired but altering the `hyperparameters` dictionary is difficult or time-consuming.
Example: To exclude both 'KNN' and 'custom' models, specify `excluded_model_types=['KNN', 'custom']`.
refit_full : bool or str, default = False
Whether to retrain all models on all of the data (training + validation) after the normal training procedure.
This is equivalent to calling `predictor.refit_full(model=refit_full)` after fit.
If `refit_full=True`, it will be treated as `refit_full='all'`.
If `refit_full=False`, refitting will not occur.
Valid str values:
`all`: refits all models.
`best`: refits only the best model (and its ancestors if it is a stacker model).
`{model_name}`: refits only the specified model (and its ancestors if it is a stacker model).
For bagged models:
Reduces a model's inference time by collapsing bagged ensembles into a single model fit on all of the training data.
This process will typically result in a slight accuracy reduction and a large inference speedup.
The inference speedup will generally be between 10-200x faster than the original bagged ensemble model.
The inference speedup factor is equivalent to (k * n), where k is the number of folds (`num_bag_folds`) and n is the number of finished repeats (`num_bag_sets`) in the bagged ensemble.
The runtime is generally 10% or less of the original fit runtime.
The runtime can be roughly estimated as 1 / (k * n) of the original fit runtime, with k and n defined above.
For non-bagged models:
Optimizes a model's accuracy by retraining on 100% of the data without using a validation set.
Will typically result in a slight accuracy increase and no change to inference time.
The runtime will be approximately equal to the original fit runtime.
This process does not alter the original models, but instead adds additional models.
If stacker models are refit by this process, they will use the refit_full versions of the ancestor models during inference.
Models produced by this process will not have validation scores, as they use all of the data for training.
Therefore, it is up to the user to determine if the models are of sufficient quality by including test data in `predictor.leaderboard(test_data)`.
If the user does not have additional test data, they should reference the original model's score for an estimate of the performance of the refit_full model.
Warning: Be aware that utilizing refit_full models without separately verifying on test data means that the model is untested, and has no guarantee of being consistent with the original model.
The time taken by this process is not enforced by `time_limit`.
set_best_to_refit_full : bool, default = False
If True, will change the default model that Predictor uses for prediction when model is not specified to the refit_full version of the model that exhibited the highest validation score.
Only valid if `refit_full` is set.
keep_only_best : bool, default = False
If True, only the best model and its ancestor models are saved in the outputted `predictor`. All other models are deleted.
If you only care about deploying the most accurate predictor with the smallest file-size and no longer need any of the other trained models or functionality beyond prediction on new data, then set: `keep_only_best=True`, `save_space=True`.
This is equivalent to calling `predictor.delete_models(models_to_keep='best', dry_run=False)` directly after `fit()`.
If used with `refit_full` and `set_best_to_refit_full`, the best model will be the refit_full model, and the original bagged best model will be deleted.
`refit_full` will be automatically set to 'best' in this case to avoid training models which will be later deleted.
save_space : bool, default = False
If True, reduces the memory and disk size of predictor by deleting auxiliary model files that aren't needed for prediction on new data.
This is equivalent to calling `predictor.save_space()` directly after `fit()`.
This has NO impact on inference accuracy.
It is recommended if the only goal is to use the trained model for prediction.
Certain advanced functionality may no longer be available if `save_space=True`. Refer to `predictor.save_space()` documentation for more details.
feature_generator : :class:`autogluon.features.generators.AbstractFeatureGenerator`, default = :class:`autogluon.features.generators.AutoMLPipelineFeatureGenerator`
The feature generator used by AutoGluon to process the input data to the form sent to the models. This often includes automated feature generation and data cleaning.
It is generally recommended to keep the default feature generator unless handling an advanced use-case.
To control aspects of the default feature generation process, you can pass in an :class:`AutoMLPipelineFeatureGenerator` object constructed using some of these kwargs:
enable_numeric_features : bool, default True
Whether to keep features of 'int' and 'float' raw types.
These features are passed without alteration to the models.
Appends IdentityFeatureGenerator(infer_features_in_args=dict(valid_raw_types=['int', 'float']))) to the generator group.
enable_categorical_features : bool, default True
Whether to keep features of 'object' and 'category' raw types.
These features are processed into memory optimized 'category' features.
Appends CategoryFeatureGenerator() to the generator group.
enable_datetime_features : bool, default True
Whether to keep features of 'datetime' raw type and 'object' features identified as 'datetime_as_object' features.
These features will be converted to 'int' features representing milliseconds since epoch.
Appends DatetimeFeatureGenerator() to the generator group.
enable_text_special_features : bool, default True
Whether to use 'object' features identified as 'text' features to generate 'text_special' features such as word count, capital letter ratio, and symbol counts.
Appends TextSpecialFeatureGenerator() to the generator group.
enable_text_ngram_features : bool, default True
Whether to use 'object' features identified as 'text' features to generate 'text_ngram' features.
Appends TextNgramFeatureGenerator(vectorizer=vectorizer) to the generator group.
enable_raw_text_features : bool, default False
Whether to keep the raw text features.
Appends IdentityFeatureGenerator(infer_features_in_args=dict(required_special_types=['text'])) to the generator group.
vectorizer : CountVectorizer, default CountVectorizer(min_df=30, ngram_range=(1, 3), max_features=10000, dtype=np.uint8)
sklearn CountVectorizer object to use in TextNgramFeatureGenerator.
Only used if `enable_text_ngram_features=True`.
unlabeled_data : pd.DataFrame, default = None
[Experimental Parameter]
Collection of data without labels that we can use to pretrain on. This is the same schema as train_data, except
without the labels. Currently, unlabeled_data is only used for pretraining a TabTransformer model.
If you do not specify 'TRANSF' with unlabeled_data, then no pretraining will occur and unlabeled_data will be ignored!
After the pretraining step, we will finetune using the TabTransformer model as well. If TabTransformer is ensembled
with other models, like in typical AutoGluon fashion, then the output of this "pretrain/finetune" will be ensembled
with other models, which will not used the unlabeled_data. The "pretrain/finetune flow" is also known as semi-supervised learning.
The typical use case for unlabeled_data is to add signal to your model where you may not have sufficient training
data. e.g. 500 hand-labeled samples (perhaps a hard human task), whole data set (unlabeled) is thousands/millions.
However, this isn't the only use case. Given enough unlabeled data(millions of rows), you may see improvements
to any amount of labeled data.
verbosity : int
If specified, overrides the existing `predictor.verbosity` value.
calibrate: bool, default = False
If True and the problem_type is classification, temperature scaling will be used to calibrate the Predictor's estimated class probabilities
(which may improve metrics like log_loss) and will train a scalar parameter on the validation set.
If True and the problem_type is quantile regression, conformalization will be used to calibrate the Predictor's estimated quantiles
(which may improve the prediction interval coverage, and bagging could futher improve it) and will compute a set of scalar parameters on the validation set.
Returns
-------
:class:`TabularPredictor` object. Returns self.
Examples
--------
>>> from autogluon.tabular import TabularDataset, TabularPredictor
>>> train_data = TabularDataset('https://autogluon.s3.amazonaws.com/datasets/Inc/train.csv')
>>> label = 'class'
>>> predictor = TabularPredictor(label=label).fit(train_data)
>>> test_data = TabularDataset('https://autogluon.s3.amazonaws.com/datasets/Inc/test.csv')
>>> leaderboard = predictor.leaderboard(test_data)
>>> y_test = test_data[label]
>>> test_data = test_data.drop(columns=[label])
>>> y_pred = predictor.predict(test_data)
>>> perf = predictor.evaluate_predictions(y_true=y_test, y_pred=y_pred)
To maximize predictive performance, use the following:
>>> eval_metric = 'roc_auc' # set this to the metric you ultimately care about
>>> time_limit = 3600 # set as long as you are willing to wait (in sec)
>>> predictor = TabularPredictor(label=label, eval_metric=eval_metric).fit(train_data, presets=['best_quality'], time_limit=time_limit)
"""
if self._learner.is_fit:
raise AssertionError(
'Predictor is already fit! To fit additional models, refer to `predictor.fit_extra`, or create a new `Predictor`.')
kwargs_orig = kwargs.copy()
kwargs = self._validate_fit_kwargs(kwargs)
verbosity = kwargs.get('verbosity', self.verbosity)
set_logger_verbosity(verbosity)
if presets:
if not isinstance(presets, list):
presets = [presets]
logger.log(20, f'Presets specified: {presets}')
if verbosity >= 3:
logger.log(20, '============ fit kwarg info ============')
logger.log(20, 'User Specified kwargs:')
logger.log(20, f'{pprint.pformat(kwargs_orig)}')
logger.log(20, 'Full kwargs:')
logger.log(20, f'{pprint.pformat(kwargs)}')
logger.log(20, '========================================')
holdout_frac = kwargs['holdout_frac']
num_bag_folds = kwargs['num_bag_folds']
num_bag_sets = kwargs['num_bag_sets']
num_stack_levels = kwargs['num_stack_levels']
auto_stack = kwargs['auto_stack']
feature_generator = kwargs['feature_generator']
unlabeled_data = kwargs['unlabeled_data']
ag_args = kwargs['ag_args']
ag_args_fit = kwargs['ag_args_fit']
ag_args_ensemble = kwargs['ag_args_ensemble']
excluded_model_types = kwargs['excluded_model_types']
use_bag_holdout = kwargs['use_bag_holdout']
if ag_args is None:
ag_args = {}
ag_args = self._set_hyperparameter_tune_kwargs_in_ag_args(kwargs['hyperparameter_tune_kwargs'], ag_args,
time_limit=time_limit)
feature_generator_init_kwargs = kwargs['_feature_generator_kwargs']
if feature_generator_init_kwargs is None:
feature_generator_init_kwargs = dict()
train_data, tuning_data, unlabeled_data = self._validate_fit_data(train_data=train_data,
tuning_data=tuning_data,
unlabeled_data=unlabeled_data)
if hyperparameters is None:
hyperparameters = 'default'
if isinstance(hyperparameters, str):
hyperparameters = get_hyperparameter_config(hyperparameters)
# TODO: Hyperparam could have non-serializble objects. Save as pkl and loaded on demand
# in case the hyperprams are large in memory
self.fit_hyperparameters_ = hyperparameters
###################################
# FIXME: v0.1 This section is a hack
if 'enable_raw_text_features' not in feature_generator_init_kwargs:
if 'AG_TEXT_NN' in hyperparameters:
feature_generator_init_kwargs['enable_raw_text_features'] = True
else:
for key in hyperparameters:
if isinstance(key, int) or key == 'default':
if 'AG_TEXT_NN' in hyperparameters[key]:
feature_generator_init_kwargs['enable_raw_text_features'] = True
break
###################################
if feature_metadata is not None and isinstance(feature_metadata, str) and feature_metadata == 'infer':
feature_metadata = None
self._set_feature_generator(feature_generator=feature_generator, feature_metadata=feature_metadata,
init_kwargs=feature_generator_init_kwargs)
num_bag_folds, num_bag_sets, num_stack_levels = self._sanitize_stack_args(
num_bag_folds=num_bag_folds, num_bag_sets=num_bag_sets, num_stack_levels=num_stack_levels,
time_limit=time_limit, auto_stack=auto_stack, num_train_rows=len(train_data),
)
if holdout_frac is None:
holdout_frac = default_holdout_frac(len(train_data),
ag_args.get('hyperparameter_tune_kwargs', None) is not None)
if kwargs['_save_bag_folds'] is not None:
if use_bag_holdout and not kwargs['_save_bag_folds']:
logger.log(30,
f'WARNING: Attempted to disable saving of bagged fold models when `use_bag_holdout=True`. Forcing `save_bag_folds=True` to avoid errors.')
else:
if ag_args_ensemble is None:
ag_args_ensemble = {}
ag_args_ensemble['save_bag_folds'] = kwargs['_save_bag_folds']
if time_limit is None:
mb_mem_usage_train_data = get_approximate_df_mem_usage(train_data, sample_ratio=0.2).sum() / 1e6
num_rows_train = len(train_data)
if mb_mem_usage_train_data >= 50 or num_rows_train >= 100000:
logger.log(20,
f'Warning: Training may take a very long time because `time_limit` was not specified and `train_data` is large ({num_rows_train} samples, {round(mb_mem_usage_train_data, 2)} MB).')
logger.log(20,
f'\tConsider setting `time_limit` to ensure training finishes within an expected duration or experiment with a small portion of `train_data` to identify an ideal `presets` and `hyperparameters` configuration.')
core_kwargs = {
'ag_args': ag_args,
'ag_args_ensemble': ag_args_ensemble,
'ag_args_fit': ag_args_fit,
'excluded_model_types': excluded_model_types,
'feature_prune_kwargs': kwargs.get('feature_prune_kwargs', None)
}
self.save(silent=True) # Save predictor to disk to enable prediction and training after interrupt
self._learner.fit(X=train_data, X_val=tuning_data, X_unlabeled=unlabeled_data,
holdout_frac=holdout_frac, num_bag_folds=num_bag_folds, num_bag_sets=num_bag_sets,
num_stack_levels=num_stack_levels,
hyperparameters=hyperparameters, core_kwargs=core_kwargs, time_limit=time_limit,
verbosity=verbosity, use_bag_holdout=use_bag_holdout)
self._set_post_fit_vars()
self._post_fit(
keep_only_best=kwargs['keep_only_best'],
refit_full=kwargs['refit_full'],
set_best_to_refit_full=kwargs['set_best_to_refit_full'],
save_space=kwargs['save_space'],
calibrate=kwargs['calibrate']
)
self.save()
return self
def _post_fit(self, keep_only_best=False, refit_full=False, set_best_to_refit_full=False, save_space=False,
calibrate=False):
if refit_full is True:
if keep_only_best is True:
if set_best_to_refit_full is True:
refit_full = 'best'
else:
logger.warning(
f'refit_full was set to {refit_full}, but keep_only_best=True and set_best_to_refit_full=False. Disabling refit_full to avoid training models which would be automatically deleted.')
refit_full = False
else:
refit_full = 'all'
if refit_full is not False:
trainer_model_best = self._trainer.get_model_best()
self.refit_full(model=refit_full)
if set_best_to_refit_full:
if trainer_model_best in self._trainer.model_full_dict.keys():
self._trainer.model_best = self._trainer.model_full_dict[trainer_model_best]
# Note: model_best will be overwritten if additional training is done with new models, since model_best will have validation score of None and any new model will have a better validation score.
# This has the side-effect of having the possibility of model_best being overwritten by a worse model than the original model_best.
self._trainer.save()
else:
logger.warning(
f'Best model ({trainer_model_best}) is not present in refit_full dictionary. Training may have failed on the refit model. AutoGluon will default to using {trainer_model_best} for predictions.')
if keep_only_best:
self.delete_models(models_to_keep='best', dry_run=False)
if calibrate:
if self.problem_type in PROBLEM_TYPES_CLASSIFICATION + [QUANTILE]:
self._calibrate_model()
else:
logger.log(30, 'WARNING: calibrate is only applicable to classification or quantile regression problems')
if save_space:
self.save_space()
def _calibrate_model(self, model_name: str = None, lr: float = 0.01, max_iter: int = 1000, init_val: float = 1.0):
"""
Applies temperature scaling to the AutoGluon model. Applies
inverse softmax to predicted probs then trains temperature scalar
on validation data to maximize negative log likelihood. Inversed
softmaxes are divided by temperature scalar then softmaxed to return
predicted probs.
Parameters:
-----------
model_name: str: default=None
model name to tune temperature scaling on. If set to None
then will tune best model only. Best model chosen by validation score
lr: float: default=0.01
The learning rate for temperature scaling algorithm
max_iter: int: default=1000
Number of iterations optimizer should take for
tuning temperature scaler
init_val: float: default=1.0
The initial value for temperature scalar term
"""
# TODO: Note that temperature scaling is known to worsen calibration in the face of shifted test data.
if model_name is None:
model_name = self._trainer.get_model_best()
if self._trainer.bagged_mode:
y_val_probs = self.get_oof_pred_proba(model_name).to_numpy()
y_val = self._trainer.load_y().to_numpy()
else:
X_val = self._trainer.load_X_val()
y_val_probs = self._trainer.predict_proba(X_val, model_name)
y_val = self._trainer.load_y_val().to_numpy()
if self.problem_type == BINARY:
y_val_probs = LabelCleanerMulticlassToBinary.convert_binary_proba_to_multiclass_proba(y_val_probs)
model = self._trainer.load_model(model_name=model_name)
if self.problem_type == QUANTILE:
logger.log(15, f'Conformity scores being computed to calibrate model: {model_name}')
conformalize = compute_conformity_score(y_val_pred=y_val_probs, y_val=y_val,
quantile_levels=self.quantile_levels)
model.conformalize = conformalize
else:
logger.log(15, f'Temperature scaling term being tuned for model: {model_name}')
temp_scalar = tune_temperature_scaling(y_val_probs=y_val_probs, y_val=y_val,
init_val=init_val, max_iter=max_iter, lr=lr)
logger.log(15, f'Temperature term found is: {temp_scalar}')
model.temperature_scalar = temp_scalar
model.save()
def fit_extra(self, hyperparameters, time_limit=None, base_model_names=None, **kwargs):
"""
Fits additional models after the original :meth:`TabularPredictor.fit` call.
The original train_data and tuning_data will be used to train the models.
Parameters
----------
hyperparameters : str or dict
Refer to argument documentation in :meth:`TabularPredictor.fit`.
If `base_model_names` is specified and hyperparameters is using the level-based key notation,
the key of the level which directly uses the base models should be 1. The level in the hyperparameters
dictionary is relative, not absolute.
time_limit : int, default = None
Refer to argument documentation in :meth:`TabularPredictor.fit`.
base_model_names : list, default = None
The names of the models to use as base models for this fit call.
Base models will provide their out-of-fold predictions as additional features to the models in `hyperparameters`.
If specified, all models trained will be stack ensembles.
If None, models will be trained as if they were specified in :meth:`TabularPredictor.fit`, without depending on existing models.
Only valid if bagging is enabled.
**kwargs :
Refer to kwargs documentation in :meth:`TabularPredictor.fit`.
Note that the following kwargs are not available in `fit_extra` as they cannot be changed from their values set in `fit()`:
[`holdout_frac`, `num_bag_folds`, `auto_stack`, `feature_generator`, `unlabeled_data`]
pseudo_data : pd.DataFrame, default = None
Data that has been self labeled by Autogluon model and will be incorporated into training during 'fit_extra'
"""
self._assert_is_fit('fit_extra')
time_start = time.time()
kwargs_orig = kwargs.copy()
kwargs = self._validate_fit_extra_kwargs(kwargs)
verbosity = kwargs.get('verbosity', self.verbosity)
set_logger_verbosity(verbosity)
if verbosity >= 3:
logger.log(20, '============ fit kwarg info ============')
logger.log(20, 'User Specified kwargs:')
logger.log(20, f'{pprint.pformat(kwargs_orig)}')
logger.log(20, 'Full kwargs:')
logger.log(20, f'{pprint.pformat(kwargs)}')
logger.log(20, '========================================')
# TODO: Allow disable aux (default to disabled)
# TODO: num_bag_sets
# num_bag_sets = kwargs['num_bag_sets']
num_stack_levels = kwargs['num_stack_levels']
# save_bag_folds = kwargs['save_bag_folds'] # TODO: Enable
ag_args = kwargs['ag_args']
ag_args_fit = kwargs['ag_args_fit']
ag_args_ensemble = kwargs['ag_args_ensemble']
excluded_model_types = kwargs['excluded_model_types']
pseudo_data = kwargs.get('pseudo_data', None)
# TODO: Since data preprocessor is fitted on original train_data it cannot account for if
# labeled pseudo data has new labels unseen in the original train. Probably need to refit
# data preprocessor if this is the case.
if pseudo_data is not None:
if self.label not in pseudo_data.columns:
raise ValueError('\'pseudo_data\' does not contain the labeled column.')
if self.sample_weight is not None:
raise ValueError('Applying \'sample_weight\' while calling \'fit_pseudolabel\' is not supported')
X_pseudo = pseudo_data.drop(columns=[self.label])
y_pseudo_og = pseudo_data[self.label]
X_pseudo = self._learner.transform_features(X_pseudo)
y_pseudo = self._learner.label_cleaner.transform(y_pseudo_og)
if np.isnan(y_pseudo.unique()).any():
raise Exception('NaN was found in the label column for pseudo labeled data.'
'Please ensure no NaN values in target column')
else:
X_pseudo = None
y_pseudo = None
if ag_args is None:
ag_args = {}
ag_args = self._set_hyperparameter_tune_kwargs_in_ag_args(kwargs['hyperparameter_tune_kwargs'], ag_args,
time_limit=time_limit)
fit_new_weighted_ensemble = False # TODO: Add as option
aux_kwargs = None # TODO: Add as option
if isinstance(hyperparameters, str):
hyperparameters = get_hyperparameter_config(hyperparameters)
if num_stack_levels is None:
hyperparameter_keys = list(hyperparameters.keys())
highest_level = 1
for key in hyperparameter_keys:
if isinstance(key, int):
highest_level = max(key, highest_level)
num_stack_levels = highest_level
# TODO: make core_kwargs a kwargs argument to predictor.fit, add aux_kwargs to predictor.fit
core_kwargs = {'ag_args': ag_args, 'ag_args_ensemble': ag_args_ensemble, 'ag_args_fit': ag_args_fit,
'excluded_model_types': excluded_model_types}
if X_pseudo is not None and y_pseudo is not None:
core_kwargs['X_pseudo'] = X_pseudo
core_kwargs['y_pseudo'] = y_pseudo
# TODO: Add special error message if called and training/val data was not cached.
X, y, X_val, y_val = self._trainer.load_data()
if y_pseudo is not None and self.problem_type in PROBLEM_TYPES_CLASSIFICATION:
y_og = self._learner.label_cleaner.inverse_transform(y)
y_og_classes = y_og.unique()
y_pseudo_classes = y_pseudo_og.unique()
matching_classes = np.in1d(y_pseudo_classes, y_og_classes)
if not matching_classes.all():
raise Exception(f'Pseudo training data contains classes not in original train data: {y_pseudo_classes[~matching_classes]}')
name_suffix = kwargs.get('name_suffix', '')
fit_models = self._trainer.train_multi_levels(
X=X, y=y, hyperparameters=hyperparameters, X_val=X_val, y_val=y_val,
base_model_names=base_model_names, time_limit=time_limit, relative_stack=True, level_end=num_stack_levels,
core_kwargs=core_kwargs, aux_kwargs=aux_kwargs, name_suffix=name_suffix
)
if time_limit is not None:
time_limit = time_limit - (time.time() - time_start)
if fit_new_weighted_ensemble:
if time_limit is not None:
time_limit_weighted = max(time_limit, 60)
else:
time_limit_weighted = None
fit_models += self.fit_weighted_ensemble(time_limit=time_limit_weighted)
self._post_fit(
keep_only_best=kwargs['keep_only_best'],
refit_full=kwargs['refit_full'],
set_best_to_refit_full=kwargs['set_best_to_refit_full'],
save_space=kwargs['save_space'],
calibrate=kwargs['calibrate']
)
self.save()
return self
def _get_all_fit_extra_args(self):
ret = list(self._fit_extra_kwargs_dict().keys()) + list(inspect.signature(self.fit_extra).parameters.keys())
ret.remove('kwargs')
return ret
def _fit_weighted_ensemble_pseudo(self):
"""
Fits weighted ensemble on top models trained with pseudo labeling, then if new
weighted ensemble model is best model then sets `model_best` in trainer to
weighted ensemble model.
"""
logger.log(15, 'Fitting weighted ensemble using top models')
weighted_ensemble_model_name = self.fit_weighted_ensemble()[0]
# TODO: This is a hack! self.predict_prob does not update to use weighted ensemble
# if it's the best model.
# TODO: There should also be PL added to weighted ensemble model name to notify
# users it is a model trained with PL models if they are indeed ensembled
model_best_name = self._trainer.leaderboard().iloc[0]['model']
if model_best_name == weighted_ensemble_model_name:
self._trainer.model_best = model_best_name
self._trainer.save()
logger.log(15, 'Weighted ensemble was the best model for current iteration of pseudo labeling')
else:
logger.log(15, 'Weighted ensemble was not the best model for current iteration of pseudo labeling')
def _run_pseudolabeling(self, unlabeled_data: pd.DataFrame, max_iter: int,
return_pred_prob: bool = False, use_ensemble: bool = False,
fit_ensemble: bool = False, fit_ensemble_every_iter: bool = False,
**kwargs):
"""
Runs pseudolabeling algorithm using the same hyperparameters and model and fit settings
used in original model unless specified by the user. This is an internal function that iteratively
self labels unlabeled test data then incorporates all self labeled data above a threshold into training.
Will keep incorporating self labeled data into training until validation score does not improve
Parameters:
-----------
unlabeled_data: Extra unlabeled data (could be the test data) to assign pseudolabels to
and incorporate as extra training data.
max_iter: int, default = 5
Maximum allowed number of iterations, where in each iteration, the data are pseudolabeled
by the current predictor and the predictor is refit including the pseudolabled data in its training set.
return_pred_proba: bool, default = False
Transductive learning setting, will return predictive probabiliteis of unlabeled_data
use_ensemble: bool, default = False
If True will use ensemble pseudo labeling algorithm if False will use best model
pseudo labeling method
fit_ensemble: bool, default = False
If True will fit weighted ensemble on final best models. Fitting weighted ensemble will be done after fitting
of models is completed unless otherwise specified. If False will not fit weighted ensemble on final best
models.
fit_ensemble_every_iter: bool, default = False
If True will fit weighted ensemble model using combination of best models
for every iteration of pseudo label algorithm. If False and fit_ensemble
is True, will just do it at the very end of training pseudo labeled models.
Returns:
--------
self: TabularPredictor
"""
previous_score = self.info()['best_model_score_val']
y_pseudo_og = pd.Series()
if return_pred_prob:
if self.problem_type is REGRESSION:
y_pred_proba_og = pd.Series()
else:
y_pred_proba_og = pd.DataFrame()
X_test = unlabeled_data.copy()
for i in range(max_iter):
if len(X_test) == 0:
logger.log(20, f'No more unlabeled data to pseudolabel. Done with pseudolabeling...')
break
iter_print = str(i + 1)
logger.log(20, f'Beginning iteration {iter_print} of pseudolabeling out of max: {max_iter}')
if use_ensemble:
if self.problem_type in PROBLEM_TYPES_CLASSIFICATION:
test_pseudo_idxes_true, y_pred_proba, y_pred = filter_ensemble_pseudo(predictor=self,
unlabeled_data=X_test)
else:
test_pseudo_idxes_true, y_pred = filter_ensemble_pseudo(predictor=self, unlabeled_data=X_test)
y_pred_proba = y_pred.copy()
else:
y_pred_proba = self.predict_proba(data=X_test, as_multiclass=True)
y_pred = get_pred_from_proba_df(y_pred_proba, problem_type=self.problem_type)
test_pseudo_idxes_true = filter_pseudo(y_pred_proba_og=y_pred_proba, problem_type=self.problem_type)
if return_pred_prob:
if i == 0:
y_pred_proba_og = y_pred_proba
else:
y_pred_proba_og.loc[test_pseudo_idxes_true.index] = y_pred_proba.loc[test_pseudo_idxes_true.index]
if len(test_pseudo_idxes_true) < 1:
logger.log(20,
f'Could not confidently assign pseudolabels for any of the provided rows in iteration: {iter_print}. Done with pseudolabeling...')
break
else:
logger.log(20,
f'Pseudolabeling algorithm confidently assigned pseudolabels to: {len(test_pseudo_idxes_true)} rows of data'
f'on iteration: {iter_print}. Adding to train data')
test_pseudo_idxes = pd.Series(data=False, index=y_pred_proba.index)
test_pseudo_idxes[test_pseudo_idxes_true.index] = True
y_pseudo_og = y_pseudo_og.append(y_pred.loc[test_pseudo_idxes_true.index], verify_integrity=True)
pseudo_data = unlabeled_data.loc[y_pseudo_og.index]
pseudo_data[self.label] = y_pseudo_og
self.fit_extra(pseudo_data=pseudo_data, name_suffix=PSEUDO_MODEL_SUFFIX.format(iter=(i + 1)),
**kwargs)
if fit_ensemble and fit_ensemble_every_iter:
self._fit_weighted_ensemble_pseudo()
current_score = self.info()['best_model_score_val']
logger.log(20,
f'Pseudolabeling algorithm changed validation score from: {previous_score}, to: {current_score}'
f' using evaluation metric: {self.eval_metric.name}')
if previous_score >= current_score:
break
else:
# Cut down X_test to not include pseudo labeled data
X_test = X_test.loc[test_pseudo_idxes[~test_pseudo_idxes].index]
previous_score = current_score
if fit_ensemble and not fit_ensemble_every_iter:
self._fit_weighted_ensemble_pseudo()
y_pred_proba_og = self.predict_proba(unlabeled_data)
if return_pred_prob:
return self, y_pred_proba_og
else:
return self
def fit_pseudolabel(self, pseudo_data: pd.DataFrame, max_iter: int = 5, return_pred_prob: bool = False,
use_ensemble: bool = False, fit_ensemble: bool = False, fit_ensemble_every_iter: bool = False,
**kwargs):
"""
If 'pseudo_data' is labeled then incorporates all test_data into train_data for
newly fit models. If 'pseudo_data' is unlabeled then 'fit_pseudolabel' will self label the
data and will augment the original training data by adding all the self labeled
data that meets a criteria (For example all rows with predictive prob above 95%). If
predictor is fit then will call fit_extra with added training data, if predictor
is not fit then will fit model on train_data then run.
Parameters
----------
pseudo_data : str or :class:`TabularDataset` or :class:`pd.DataFrame`
Extra data to incorporate into training. Pre-labeled test data allowed. If no labels
then pseudolabeling algorithm will predict and filter out which rows to incorporate into
training
max_iter: int, default = 5
Maximum iterations of pseudolabeling allowed
return_pred_prob: bool, default = False
Returns held-out predictive probabilities from pseudo-labeling. If test_data is labeled then
returns model's predictive probabilities.
use_ensemble: bool, default = False
If True will use ensemble pseudo labeling algorithm. If False will just use best model
for pseudo labeling algorithm.
fit_ensemble: bool, default = False
If True with fit weighted ensemble model using combination of best models.
Fitting weighted ensemble will be done after fitting has
being completed unless otherwise specified. If False will not fit weighted ensemble
over models trained with pseudo labeling and models trained without it.
fit_ensemble_every_iter: bool, default = False
If True fits weighted ensemble model for every iteration of pseudo labeling algorithm. If False
and fit_ensemble is True will fit after all pseudo labeling training is done.
kwargs: dict
If predictor is not already fit, then kwargs are for the functions 'fit' and 'fit_extra':
Refer to parameters documentation in :meth:`TabularPredictor.fit`.
Refer to parameters documentation in :meth:`TabularPredictor.fit_extra`.
If predictor is fit kwargs are for 'fit_extra':
Refer to parameters documentation in :meth:`TabularPredictor.fit_extra`.
Returns
-------
self : TabularPredictor
Returns self, which is a Python class of TabularPredictor
"""
if len(pseudo_data) < 1:
raise Exception('No pseudo data given')
if not self._learner.is_fit:
if 'train_data' not in kwargs.keys():
Exception('Autogluon is required to be fit or given \'train_data\' in order to run \'fit_pseudolabel\'.'
' Autogluon is not fit and \'train_data\' was not given')
logger.log(20,
f'Predictor not fit prior to pseudolabeling. Fitting now...')
self.fit(**kwargs)
if self.problem_type is MULTICLASS and self.eval_metric.name != 'accuracy':
logger.warning('AutoGluon has detected the problem type as \'multiclass\' and '
f'eval_metric is {self.eval_metric.name}, we recommend using'
f'fit_pseudolabeling when eval metric is \'accuracy\'')
is_labeled = self.label in pseudo_data.columns
hyperparameters = kwargs.get('hyperparameters', None)
if hyperparameters is None:
if self._learner.is_fit:
hyperparameters = self.fit_hyperparameters_
elif isinstance(hyperparameters, str):
hyperparameters = get_hyperparameter_config(hyperparameters)
kwargs['hyperparameters'] = hyperparameters
fit_extra_args = self._get_all_fit_extra_args()
fit_extra_kwargs = {key: value for key, value in kwargs.items() if key in fit_extra_args}
if is_labeled:
logger.log(20, "Fitting predictor using the provided pseudolabeled examples as extra training data...")
self.fit_extra(pseudo_data=pseudo_data, name_suffix=PSEUDO_MODEL_SUFFIX.format(iter='')[:-1],
**fit_extra_kwargs)
if fit_ensemble:
logger.log(15, 'Fitting weighted ensemble model using best models')
self.fit_weighted_ensemble()
if return_pred_prob:
y_pred_proba = self.predict_proba(pseudo_data)
return self, y_pred_proba
else:
return self
else:
logger.log(20, 'Given test_data for pseudo labeling did not contain labels. '
'AutoGluon will assign pseudo labels to data and use it for extra training data...')
return self._run_pseudolabeling(unlabeled_data=pseudo_data, max_iter=max_iter,
return_pred_prob=return_pred_prob, use_ensemble=use_ensemble,
fit_ensemble=fit_ensemble, fit_ensemble_every_iter=fit_ensemble_every_iter,
**fit_extra_kwargs)
def predict(self, data, model=None, as_pandas=True):
"""
Use trained models to produce predictions of `label` column values for new data.
Parameters
----------
data : str or :class:`TabularDataset` or :class:`pd.DataFrame`
The data to make predictions for. Should contain same column names as training Dataset and follow same format
(may contain extra columns that won't be used by Predictor, including the label-column itself).
If str is passed, `data` will be loaded using the str value as the file path.
model : str (optional)
The name of the model to get predictions from. Defaults to None, which uses the highest scoring model on the validation set.
Valid models are listed in this `predictor` by calling `predictor.get_model_names()`
as_pandas : bool, default = True
Whether to return the output as a :class:`pd.Series` (True) or :class:`np.ndarray` (False).
Returns
-------
Array of predictions, one corresponding to each row in given dataset. Either :class:`np.ndarray` or :class:`pd.Series` depending on `as_pandas` argument.
"""
self._assert_is_fit('predict')
data = self.__get_dataset(data)
return self._learner.predict(X=data, model=model, as_pandas=as_pandas)
def predict_proba(self, data, model=None, as_pandas=True, as_multiclass=True):
"""
Use trained models to produce predicted class probabilities rather than class-labels (if task is classification).
If `predictor.problem_type` is regression, this functions identically to `predict`, returning the same output.
Parameters
----------
data : str or :class:`TabularDataset` or :class:`pd.DataFrame`
The data to make predictions for. Should contain same column names as training dataset and follow same format
(may contain extra columns that won't be used by Predictor, including the label-column itself).
If str is passed, `data` will be loaded using the str value as the file path.
model : str (optional)
The name of the model to get prediction probabilities from. Defaults to None, which uses the highest scoring model on the validation set.
Valid models are listed in this `predictor` by calling `predictor.get_model_names()`.
as_pandas : bool, default = True
Whether to return the output as a pandas object (True) or numpy array (False).
Pandas object is a DataFrame if this is a multiclass problem or `as_multiclass=True`, otherwise it is a Series.
If the output is a DataFrame, the column order will be equivalent to `predictor.class_labels`.
as_multiclass : bool, default = True
Whether to return binary classification probabilities as if they were for multiclass classification.
Output will contain two columns, and if `as_pandas=True`, the column names will correspond to the binary class labels.
The columns will be the same order as `predictor.class_labels`.
If False, output will contain only 1 column for the positive class (get positive_class name via `predictor.positive_class`).
Only impacts output for binary classification problems.
Returns
-------
Array of predicted class-probabilities, corresponding to each row in the given data.
May be a :class:`np.ndarray` or :class:`pd.DataFrame` / :class:`pd.Series` depending on `as_pandas` and `as_multiclass` arguments and the type of prediction problem.
For binary classification problems, the output contains for each datapoint the predicted probabilities of the negative and positive classes, unless you specify `as_multiclass=False`.
"""
self._assert_is_fit('predict_proba')
data = self.__get_dataset(data)
return self._learner.predict_proba(X=data, model=model, as_pandas=as_pandas, as_multiclass=as_multiclass)
def evaluate(self, data, model=None, silent=False, auxiliary_metrics=True, detailed_report=False) -> dict:
"""
Report the predictive performance evaluated over a given dataset.
This is basically a shortcut for: `pred_proba = predict_proba(data); evaluate_predictions(data[label], pred_proba)`.
Parameters
----------
data : str or :class:`TabularDataset` or :class:`pd.DataFrame`
This dataset must also contain the `label` with the same column-name as previously specified.
If str is passed, `data` will be loaded using the str value as the file path.
model : str (optional)
The name of the model to get prediction probabilities from. Defaults to None, which uses the highest scoring model on the validation set.
Valid models are listed in this `predictor` by calling `predictor.get_model_names()`.
silent : bool, default = False
If False, performance results are printed.
auxiliary_metrics: bool, default = True
Should we compute other (`problem_type` specific) metrics in addition to the default metric?
detailed_report : bool, default = False
Should we computed more detailed versions of the `auxiliary_metrics`? (requires `auxiliary_metrics = True`)
Returns
-------
Returns dict where keys = metrics, values = performance along each metric. To get the `eval_metric` score, do `output[predictor.eval_metric.name]`
NOTE: Metrics scores always show in higher is better form.
This means that metrics such as log_loss and root_mean_squared_error will have their signs FLIPPED, and values will be negative.
"""
self._assert_is_fit('evaluate')
data = self.__get_dataset(data)
y_pred_proba = self.predict_proba(data=data, model=model)
return self.evaluate_predictions(y_true=data[self.label], y_pred=y_pred_proba, silent=silent,
auxiliary_metrics=auxiliary_metrics, detailed_report=detailed_report)
def evaluate_predictions(self, y_true, y_pred, silent=False, auxiliary_metrics=True, detailed_report=False) -> dict:
"""
Evaluate the provided prediction probabilities against ground truth labels.
Evaluation is based on the `eval_metric` previously specified in init, or default metrics if none was specified.
Parameters
----------
y_true : :class:`np.array` or :class:`pd.Series`
The ordered collection of ground-truth labels.
y_pred : :class:`pd.Series` or :class:`pd.DataFrame`
The ordered collection of prediction probabilities or predictions.
Obtainable via the output of `predictor.predict_proba`.
Caution: For certain types of `eval_metric` (such as 'roc_auc'), `y_pred` must be predicted-probabilities rather than predicted labels.
silent : bool, default = False
If False, performance results are printed.
auxiliary_metrics: bool, default = True
Should we compute other (`problem_type` specific) metrics in addition to the default metric?
detailed_report : bool, default = False
Should we computed more detailed versions of the `auxiliary_metrics`? (requires `auxiliary_metrics = True`)
Returns
-------
Returns dict where keys = metrics, values = performance along each metric.
NOTE: Metrics scores always show in higher is better form.
This means that metrics such as log_loss and root_mean_squared_error will have their signs FLIPPED, and values will be negative.
"""
return self._learner.evaluate_predictions(y_true=y_true, y_pred=y_pred, silent=silent,
auxiliary_metrics=auxiliary_metrics, detailed_report=detailed_report)
def leaderboard(self, data=None, extra_info=False, extra_metrics=None, only_pareto_frontier=False, silent=False):
"""
Output summary of information about models produced during `fit()` as a :class:`pd.DataFrame`.
Includes information on test and validation scores for all models, model training times, inference times, and stack levels.
Output DataFrame columns include:
'model': The name of the model.
'score_val': The validation score of the model on the 'eval_metric'.
NOTE: Metrics scores always show in higher is better form.
This means that metrics such as log_loss and root_mean_squared_error will have their signs FLIPPED, and values will be negative.
This is necessary to avoid the user needing to know the metric to understand if higher is better when looking at leaderboard.
'pred_time_val': The inference time required to compute predictions on the validation data end-to-end.
Equivalent to the sum of all 'pred_time_val_marginal' values for the model and all of its base models.
'fit_time': The fit time required to train the model end-to-end (Including base models if the model is a stack ensemble).
Equivalent to the sum of all 'fit_time_marginal' values for the model and all of its base models.
'pred_time_val_marginal': The inference time required to compute predictions on the validation data (Ignoring inference times for base models).
Note that this ignores the time required to load the model into memory when bagging is disabled.
'fit_time_marginal': The fit time required to train the model (Ignoring base models).
'stack_level': The stack level of the model.
A model with stack level N can take any set of models with stack level less than N as input, with stack level 1 models having no model inputs.
'can_infer': If model is able to perform inference on new data. If False, then the model either was not saved, was deleted, or an ancestor of the model cannot infer.
`can_infer` is often False when `save_bag_folds=False` was specified in initial `fit()`.
'fit_order': The order in which models were fit. The first model fit has `fit_order=1`, and the Nth model fit has `fit_order=N`. The order corresponds to the first child model fit in the case of bagged ensembles.
Parameters
----------
data : str or :class:`TabularDataset` or :class:`pd.DataFrame` (optional)
This Dataset must also contain the label-column with the same column-name as specified during fit().
If specified, then the leaderboard returned will contain additional columns 'score_test', 'pred_time_test', and 'pred_time_test_marginal'.
'score_test': The score of the model on the 'eval_metric' for the data provided.
NOTE: Metrics scores always show in higher is better form.
This means that metrics such as log_loss and root_mean_squared_error will have their signs FLIPPED, and values will be negative.
This is necessary to avoid the user needing to know the metric to understand if higher is better when looking at leaderboard.
'pred_time_test': The true end-to-end wall-clock inference time of the model for the data provided.
Equivalent to the sum of all 'pred_time_test_marginal' values for the model and all of its base models.
'pred_time_test_marginal': The inference time of the model for the data provided, minus the inference time for the model's base models, if it has any.
Note that this ignores the time required to load the model into memory when bagging is disabled.
If str is passed, `data` will be loaded using the str value as the file path.
extra_info : bool, default = False
If `True`, will return extra columns with advanced info.
This requires additional computation as advanced info data is calculated on demand.
Additional output columns when `extra_info=True` include:
'num_features': Number of input features used by the model.
Some models may ignore certain features in the preprocessed data.
'num_models': Number of models that actually make up this "model" object.
For non-bagged models, this is 1. For bagged models, this is equal to the number of child models (models trained on bagged folds) the bagged ensemble contains.
'num_models_w_ancestors': Equivalent to the sum of 'num_models' values for the model and its' ancestors (see below).
'memory_size': The amount of memory in bytes the model requires when persisted in memory. This is not equivalent to the amount of memory the model may use during inference.
For bagged models, this is the sum of the 'memory_size' of all child models.
'memory_size_w_ancestors': Equivalent to the sum of 'memory_size' values for the model and its' ancestors.
This is the amount of memory required to avoid loading any models in-between inference calls to get predictions from this model.
For online-inference, this is critical. It is important that the machine performing online inference has memory more than twice this value to avoid loading models for every call to inference by persisting models in memory.
'memory_size_min': The amount of memory in bytes the model minimally requires to perform inference.
For non-bagged models, this is equivalent to 'memory_size'.
For bagged models, this is equivalent to the largest child model's 'memory_size_min'.
To minimize memory usage, child models can be loaded and un-persisted one by one to infer. This is the default behavior if a bagged model was not already persisted in memory prior to inference.
'memory_size_min_w_ancestors': Equivalent to the max of the 'memory_size_min' values for the model and its' ancestors.
This is the minimum required memory to infer with the model by only loading one model at a time, as each of its ancestors will also have to be loaded into memory.
For offline-inference where latency is not a concern, this should be used to determine the required memory for a machine if 'memory_size_w_ancestors' is too large.
'num_ancestors': Number of ancestor models for the given model.
'num_descendants': Number of descendant models for the given model.
'model_type': The type of the given model.
If the model is an ensemble type, 'child_model_type' will indicate the inner model type. A stack ensemble of bagged LightGBM models would have 'StackerEnsembleModel' as its model type.
'child_model_type': The child model type. None if the model is not an ensemble. A stack ensemble of bagged LightGBM models would have 'LGBModel' as its child type.
child models are models which are used as a group to generate a given bagged ensemble model's predictions. These are the models trained on each fold of a bagged ensemble.
For 10-fold bagging, the bagged ensemble model would have 10 child models.
For 10-fold bagging with 3 repeats, the bagged ensemble model would have 30 child models.
Note that child models are distinct from ancestors and descendants.
'hyperparameters': The hyperparameter values specified for the model.
All hyperparameters that do not appear in this dict remained at their default values.
'hyperparameters_fit': The hyperparameters set by the model during fit.
This overrides the 'hyperparameters' value for a particular key if present in 'hyperparameters_fit' to determine the fit model's final hyperparameters.
This is most commonly set for hyperparameters that indicate model training iterations or epochs, as early stopping can find a different value from what 'hyperparameters' indicated.
In these cases, the provided hyperparameter in 'hyperparameters' is used as a maximum for the model, but the model is still able to early stop at a smaller value during training to achieve a better validation score or to satisfy time constraints.
For example, if a NN model was given `epochs=500` as a hyperparameter, but found during training that `epochs=60` resulted in optimal validation score, it would use `epoch=60` and `hyperparameters_fit={'epoch': 60}` would be set.
'ag_args_fit': Special AutoGluon arguments that influence model fit.
See the documentation of the `hyperparameters` argument in `TabularPredictor.fit()` for more information.
'features': List of feature names used by the model.
'child_hyperparameters': Equivalent to 'hyperparameters', but for the model's children.
'child_hyperparameters_fit': Equivalent to 'hyperparameters_fit', but for the model's children.
'child_ag_args_fit': Equivalent to 'ag_args_fit', but for the model's children.
'ancestors': The model's ancestors. Ancestor models are the models which are required to make predictions during the construction of the model's input features.
If A is an ancestor of B, then B is a descendant of A.
If a model's ancestor is deleted, the model is no longer able to infer on new data, and its 'can_infer' value will be False.
A model can only have ancestor models whose 'stack_level' are lower than itself.
'stack_level'=1 models have no ancestors.
'descendants': The model's descendants. Descendant models are the models which require this model to make predictions during the construction of their input features.
If A is a descendant of B, then B is an ancestor of A.
If this model is deleted, then all descendant models will no longer be able to infer on new data, and their 'can_infer' values will be False.
A model can only have descendant models whose 'stack_level' are higher than itself.
extra_metrics : list, default = None
A list of metrics to calculate scores for and include in the output DataFrame.
Only valid when `data` is specified. The scores refer to the scores on `data` (same data as used to calculate the `score_test` column).
This list can contain any values which would also be valid for `eval_metric` in predictor init.
For example, `extra_metrics=['accuracy', 'roc_auc', 'log_loss']` would be valid in binary classification.
This example would return 3 additional columns in the output DataFrame, whose column names match the names of the metrics.
Passing `extra_metrics=[predictor.eval_metric]` would return an extra column in the name of the eval metric that has identical values to `score_test`.
This also works with custom metrics. If passing an object instead of a string, the column name will be equal to the `.name` attribute of the object.
NOTE: Metrics scores always show in higher is better form.
This means that metrics such as log_loss and root_mean_squared_error will have their signs FLIPPED, and values will be negative.
This is necessary to avoid the user needing to know the metric to understand if higher is better when looking at leaderboard.
only_pareto_frontier : bool, default = False
If `True`, only return model information of models in the Pareto frontier of the accuracy/latency trade-off (models which achieve the highest score within their end-to-end inference time).
At minimum this will include the model with the highest score and the model with the lowest inference time.
This is useful when deciding which model to use during inference if inference time is a consideration.
Models filtered out by this process would never be optimal choices for a user that only cares about model inference time and score.
silent : bool, default = False
Should leaderboard DataFrame be printed?
Returns
-------
:class:`pd.DataFrame` of model performance summary information.
"""
self._assert_is_fit('leaderboard')
data = self.__get_dataset(data) if data is not None else data
return self._learner.leaderboard(X=data, extra_info=extra_info, extra_metrics=extra_metrics,
only_pareto_frontier=only_pareto_frontier, silent=silent)
def fit_summary(self, verbosity=3, show_plot=False):
"""
Output summary of information about models produced during `fit()`.
May create various generated summary plots and store them in folder: `predictor.path`.
Parameters
----------
verbosity : int, default = 3
Controls how detailed of a summary to output.
Set <= 0 for no output printing, 1 to print just high-level summary,
2 to print summary and create plots, >= 3 to print all information produced during `fit()`.
show_plot : bool, default = False
If True, shows the model summary plot in browser when verbosity > 1.
Returns
-------
Dict containing various detailed information. We do not recommend directly printing this dict as it may be very large.
"""
self._assert_is_fit('fit_summary')
# hpo_used = len(self._trainer.hpo_results) > 0
hpo_used = False # Disabled until a more memory efficient hpo_results object is implemented.
model_types = self._trainer.get_models_attribute_dict(attribute='type')
model_inner_types = self._trainer.get_models_attribute_dict(attribute='type_inner')
model_typenames = {key: model_types[key].__name__ for key in model_types}
model_innertypenames = {key: model_inner_types[key].__name__ for key in model_types if key in model_inner_types}
MODEL_STR = 'Model'
ENSEMBLE_STR = 'Ensemble'
for model in model_typenames:
if (model in model_innertypenames) and (ENSEMBLE_STR not in model_innertypenames[model]) and (
ENSEMBLE_STR in model_typenames[model]):
new_model_typename = model_typenames[model] + "_" + model_innertypenames[model]
if new_model_typename.endswith(MODEL_STR):
new_model_typename = new_model_typename[:-len(MODEL_STR)]
model_typenames[model] = new_model_typename
unique_model_types = set(model_typenames.values()) # no more class info
# all fit() information that is returned:
results = {
'model_types': model_typenames, # dict with key = model-name, value = type of model (class-name)
'model_performance': self._trainer.get_models_attribute_dict('val_score'),
# dict with key = model-name, value = validation performance
'model_best': self._trainer.model_best, # the name of the best model (on validation data)
'model_paths': self._trainer.get_models_attribute_dict('path'),
# dict with key = model-name, value = path to model file
'model_fit_times': self._trainer.get_models_attribute_dict('fit_time'),
'model_pred_times': self._trainer.get_models_attribute_dict('predict_time'),
'num_bag_folds': self._trainer.k_fold,
'max_stack_level': self._trainer.get_max_level(),
}
if self.problem_type == QUANTILE:
results['num_quantiles'] = len(self.quantile_levels)
elif self.problem_type != REGRESSION:
results['num_classes'] = self._trainer.num_classes
# if hpo_used:
# results['hpo_results'] = self._trainer.hpo_results
# get dict mapping model name to final hyperparameter values for each model:
model_hyperparams = {}
for model_name in self._trainer.get_model_names():
model_obj = self._trainer.load_model(model_name)
model_hyperparams[model_name] = model_obj.params
results['model_hyperparams'] = model_hyperparams
if verbosity > 0: # print stuff
print("*** Summary of fit() ***")
print("Estimated performance of each model:")
results['leaderboard'] = self._learner.leaderboard(silent=False)
# self._summarize('model_performance', 'Validation performance of individual models', results)
# self._summarize('model_best', 'Best model (based on validation performance)', results)
# self._summarize('hyperparameter_tune', 'Hyperparameter-tuning used', results)
print("Number of models trained: %s" % len(results['model_performance']))
print("Types of models trained:")
print(unique_model_types)
num_fold_str = ""
bagging_used = results['num_bag_folds'] > 0
if bagging_used:
num_fold_str = f" (with {results['num_bag_folds']} folds)"
print("Bagging used: %s %s" % (bagging_used, num_fold_str))
num_stack_str = ""
stacking_used = results['max_stack_level'] > 2
if stacking_used:
num_stack_str = f" (with {results['max_stack_level']} levels)"
print("Multi-layer stack-ensembling used: %s %s" % (stacking_used, num_stack_str))
hpo_str = ""
# if hpo_used and verbosity <= 2:
# hpo_str = " (call fit_summary() with verbosity >= 3 to see detailed HPO info)"
# print("Hyperparameter-tuning used: %s %s" % (hpo_used, hpo_str))
# TODO: uncomment once feature_prune is functional: self._summarize('feature_prune', 'feature-selection used', results)
print("Feature Metadata (Processed):")
print("(raw dtype, special dtypes):")
print(self.feature_metadata)
if verbosity > 1: # create plots
plot_tabular_models(results, output_directory=self.path,
save_file="SummaryOfModels.html",
plot_title="Models produced during fit()",
show_plot=show_plot)
if hpo_used:
for model_type in results['hpo_results']:
if 'trial_info' in results['hpo_results'][model_type]:
plot_summary_of_models(
results['hpo_results'][model_type],
output_directory=self.path, save_file=model_type + "_HPOmodelsummary.html",
plot_title=f"Models produced during {model_type} HPO", show_plot=show_plot)
plot_performance_vs_trials(
results['hpo_results'][model_type],
output_directory=self.path, save_file=model_type + "_HPOperformanceVStrials.png",
plot_title=f"HPO trials for {model_type} models", show_plot=show_plot)
if verbosity > 2: # print detailed information
if hpo_used:
hpo_results = results['hpo_results']
print("*** Details of Hyperparameter optimization ***")
for model_type in hpo_results:
hpo_model = hpo_results[model_type]
if 'trial_info' in hpo_model:
print(
f"HPO for {model_type} model: Num. configurations tried = {len(hpo_model['trial_info'])}, Time spent = {hpo_model['total_time']}s, Search strategy = {hpo_model['search_strategy']}")
print(
f"Best hyperparameter-configuration (validation-performance: {self.eval_metric} = {hpo_model['validation_performance']}):")
print(hpo_model['best_config'])
"""
if bagging_used:
pass # TODO: print detailed bagging info
if stacking_used:
pass # TODO: print detailed stacking info, like how much it improves validation performance
if results['feature_prune']:
pass # TODO: print detailed feature-selection info once feature-selection is functional.
"""
if verbosity > 0:
print("*** End of fit() summary ***")
return results
def transform_features(self, data=None, model=None, base_models=None, return_original_features=True):
"""
Transforms data features through the AutoGluon feature generator.
This is useful to gain an understanding of how AutoGluon interprets the data features.
The output of this function can be used to train further models, even outside of AutoGluon.
This can be useful for training your own models on the same data representation as AutoGluon.
Individual AutoGluon models like the neural network may apply additional feature transformations that are not reflected in this method.
This method only applies universal transforms employed by all AutoGluon models.
When `data=None`, `base_models=[{best_model}], and bagging was enabled during fit():
This returns the out-of-fold predictions of the best model, which can be used as training input to a custom user stacker model.
Parameters
----------
data : str or :class:`TabularDataset` or :class:`pd.DataFrame` (optional)
The data to apply feature transformation to.
This data does not require the label column.
If str is passed, `data` will be loaded using the str value as the file path.
If not specified, the original data used during fit() will be used if fit() was previously called with `cache_data=True`. Otherwise, an exception will be raised.
For non-bagged mode predictors:
The data used when not specified is the validation set.
This can either be an automatically generated validation set or the user-defined `tuning_data` if passed during fit().
If all parameters are unspecified, then the output is equivalent to `predictor.load_data_internal(data='val', return_X=True, return_y=False)[0]`.
To get the label values of the output, call `predictor.load_data_internal(data='val', return_X=False, return_y=True)[1]`.
If the original training set is desired, it can be passed in through `data`.
Warning: Do not pass the original training set if `model` or `base_models` are set. This will result in overfit feature transformation.
For bagged mode predictors:
The data used when not specified is the full training set.
If all parameters are unspecified, then the output is equivalent to `predictor.load_data_internal(data='train', return_X=True, return_y=False)[0]`.
To get the label values of the output, call `predictor.load_data_internal(data='train', return_X=False, return_y=True)[1]`.
`base_model` features generated in this instance will be from out-of-fold predictions.
Note that the training set may differ from the training set originally passed during fit(), as AutoGluon may choose to drop or duplicate rows during training.
Warning: Do not pass the original training set through `data` if `model` or `base_models` are set. This will result in overfit feature transformation. Instead set `data=None`.
model : str, default = None
Model to generate input features for.
The output data will be equivalent to the input data that would be sent into `model.predict_proba(data)`.
Note: This only applies to cases where `data` is not the training data.
If `None`, then only return generically preprocessed features prior to any model fitting.
Valid models are listed in this `predictor` by calling `predictor.get_model_names()`.
Specifying a `refit_full` model will cause an exception if `data=None`.
`base_models=None` is a requirement when specifying `model`.
base_models : list, default = None
List of model names to use as base_models for a hypothetical stacker model when generating input features.
If `None`, then only return generically preprocessed features prior to any model fitting.
Valid models are listed in this `predictor` by calling `predictor.get_model_names()`.
If a stacker model S exists with `base_models=M`, then setting `base_models=M` is equivalent to setting `model=S`.
`model=None` is a requirement when specifying `base_models`.
return_original_features : bool, default = True
Whether to return the original features.
If False, only returns the additional output columns from specifying `model` or `base_models`.
This is useful to set to False if the intent is to use the output as input to further stacker models without the original features.
Returns
-------
:class:`pd.DataFrame` of the provided `data` after feature transformation has been applied.
This output does not include the label column, and will remove it if present in the supplied `data`.
If a transformed label column is desired, use `predictor.transform_labels`.
Examples
--------
>>> from autogluon.tabular import TabularPredictor
>>> predictor = TabularPredictor(label='class').fit('train.csv', label='class', auto_stack=True) # predictor is in bagged mode.
>>> model = 'WeightedEnsemble_L2'
>>> train_data_transformed = predictor.transform_features(model=model) # Internal training DataFrame used as input to `model.fit()` for each model trained in predictor.fit()`
>>> test_data_transformed = predictor.transform_features('test.csv', model=model) # Internal test DataFrame used as input to `model.predict_proba()` during `predictor.predict_proba(test_data, model=model)`
"""
self._assert_is_fit('transform_features')
data = self.__get_dataset(data) if data is not None else data
return self._learner.get_inputs_to_stacker(dataset=data, model=model, base_models=base_models,
use_orig_features=return_original_features)
def transform_labels(self, labels, inverse=False, proba=False):
"""
Transforms data labels to the internal label representation.
This can be useful for training your own models on the same data label representation as AutoGluon.
Regression problems do not differ between original and internal representation, and thus this method will return the provided labels.
Warning: When `inverse=False`, it is possible for the output to contain NaN label values in multiclass problems if the provided label was dropped during training.
Parameters
----------
labels : :class:`np.ndarray` or :class:`pd.Series`
Labels to transform.
If `proba=False`, an example input would be the output of `predictor.predict(test_data)`.
If `proba=True`, an example input would be the output of `predictor.predict_proba(test_data, as_multiclass=False)`.
inverse : boolean, default = False
When `True`, the input labels are treated as being in the internal representation and the original representation is outputted.
proba : boolean, default = False
When `True`, the input labels are treated as probabilities and the output will be the internal representation of probabilities.
In this case, it is expected that `labels` be a :class:`pd.DataFrame` or :class:`np.ndarray`.
If the `problem_type` is multiclass:
The input column order must be equal to `predictor.class_labels`.
The output column order will be equal to `predictor.class_labels_internal`.
if `inverse=True`, the same logic applies, but with input and output columns interchanged.
When `False`, the input labels are treated as actual labels and the output will be the internal representation of the labels.
In this case, it is expected that `labels` be a :class:`pd.Series` or :class:`np.ndarray`.
Returns
-------
:class:`pd.Series` of labels if `proba=False` or :class:`pd.DataFrame` of label probabilities if `proba=True`.
"""
self._assert_is_fit('transform_labels')
if inverse:
if proba:
labels_transformed = self._learner.label_cleaner.inverse_transform_proba(y=labels, as_pandas=True)
else:
labels_transformed = self._learner.label_cleaner.inverse_transform(y=labels)
else:
if proba:
labels_transformed = self._learner.label_cleaner.transform_proba(y=labels, as_pandas=True)
else:
labels_transformed = self._learner.label_cleaner.transform(y=labels)
return labels_transformed
def feature_importance(self, data=None, model=None, features=None, feature_stage='original', subsample_size=1000,
time_limit=None, num_shuffle_sets=None, include_confidence_band=True, confidence_level=0.99,
silent=False):
"""
Calculates feature importance scores for the given model via permutation importance. Refer to https://explained.ai/rf-importance/ for an explanation of permutation importance.
A feature's importance score represents the performance drop that results when the model makes predictions on a perturbed copy of the data where this feature's values have been randomly shuffled across rows.
A feature score of 0.01 would indicate that the predictive performance dropped by 0.01 when the feature was randomly shuffled.
The higher the score a feature has, the more important it is to the model's performance.
If a feature has a negative score, this means that the feature is likely harmful to the final model, and a model trained with the feature removed would be expected to achieve a better predictive performance.
Note that calculating feature importance can be a very computationally expensive process, particularly if the model uses hundreds or thousands of features. In many cases, this can take longer than the original model training.
To estimate how long `feature_importance(model, data, features)` will take, it is roughly the time taken by `predict_proba(data, model)` multiplied by the number of features.
Note: For highly accurate importance and p_value estimates, it is recommend to set `subsample_size` to at least 5,000 if possible and `num_shuffle_sets` to at least 10.
Parameters
----------
data : str or :class:`TabularDataset` or :class:`pd.DataFrame` (optional)
This data must also contain the label-column with the same column-name as specified during `fit()`.
If specified, then the data is used to calculate the feature importance scores.
If str is passed, `data` will be loaded using the str value as the file path.
If not specified, the original data used during `fit()` will be used if `cache_data=True`. Otherwise, an exception will be raised.
Do not pass the training data through this argument, as the feature importance scores calculated will be biased due to overfitting.
More accurate feature importances will be obtained from new data that was held-out during `fit()`.
model : str, default = None
Model to get feature importances for, if None the best model is chosen.
Valid models are listed in this `predictor` by calling `predictor.get_model_names()`
features : list, default = None
List of str feature names that feature importances are calculated for and returned, specify None to get all feature importances.
If you only want to compute feature importances for some of the features, you can pass their names in as a list of str.
Valid feature names change depending on the `feature_stage`.
To get the list of feature names for `feature_stage='original'`, call `predictor.feature_metadata_in.get_features()`.
To get the list of feature names for `feature_stage='transformed'`, call `list(predictor.transform_features().columns)`.
To get the list of feature names for `feature_stage=`transformed_model`, call `list(predictor.transform_features(model={model_name}).columns)`.
[Advanced] Can also contain tuples as elements of (feature_name, feature_list) form.
feature_name can be any string so long as it is unique with all other feature names / features in the list.
feature_list can be any list of valid features in the data.
This will compute importance of the combination of features in feature_list, naming the set of features in the returned DataFrame feature_name.
This importance will differ from adding the individual importances of each feature in feature_list, and will be more accurate to the overall group importance.
Example: ['featA', 'featB', 'featC', ('featBC', ['featB', 'featC'])]
In this example, the importance of 'featBC' will be calculated by jointly permuting 'featB' and 'featC' together as if they were a single two-dimensional feature.
feature_stage : str, default = 'original'
What stage of feature-processing should importances be computed for.
Options:
'original':
Compute importances of the original features.
Warning: `data` must be specified with this option, otherwise an exception will be raised.
'transformed':
Compute importances of the post-internal-transformation features (after automated feature engineering). These features may be missing some original features, or add new features entirely.
An example of new features would be ngram features generated from a text column.
Warning: For bagged models, feature importance calculation is not yet supported with this option when `data=None`. Doing so will raise an exception.
'transformed_model':
Compute importances of the post-model-transformation features. These features are the internal features used by the requested model. They may differ greatly from the original features.
If the model is a stack ensemble, this will include stack ensemble features such as the prediction probability features of the stack ensemble's base (ancestor) models.
subsample_size : int, default = 1000
The number of rows to sample from `data` when computing feature importance.
If `subsample_size=None` or `data` contains fewer than `subsample_size` rows, all rows will be used during computation.
Larger values increase the accuracy of the feature importance scores.
Runtime linearly scales with `subsample_size`.
time_limit : float, default = None
Time in seconds to limit the calculation of feature importance.
If None, feature importance will calculate without early stopping.
A minimum of 1 full shuffle set will always be evaluated. If a shuffle set evaluation takes longer than `time_limit`, the method will take the length of a shuffle set evaluation to return regardless of the `time_limit`.
num_shuffle_sets : int, default = None
The number of different permutation shuffles of the data that are evaluated.
Larger values will increase the quality of the importance evaluation.
It is generally recommended to increase `subsample_size` before increasing `num_shuffle_sets`.
Defaults to 3 if `time_limit` is None or 10 if `time_limit` is specified.
Runtime linearly scales with `num_shuffle_sets`.
include_confidence_band: bool, default = True
If True, returned DataFrame will include two additional columns specifying confidence interval for the true underlying importance value of each feature.
Increasing `subsample_size` and `num_shuffle_sets` will tighten the confidence interval.
confidence_level: float, default = 0.99
This argument is only considered when `include_confidence_band` is True, and can be used to specify the confidence level used for constructing confidence intervals.
For example, if `confidence_level` is set to 0.99, then the returned DataFrame will include columns 'p99_high' and 'p99_low' which indicates that the true feature importance will be between 'p99_high' and 'p99_low' 99% of the time (99% confidence interval).
More generally, if `confidence_level` = 0.XX, then the columns containing the XX% confidence interval will be named 'pXX_high' and 'pXX_low'.
silent : bool, default = False
Whether to suppress logging output.
Returns
-------
:class:`pd.DataFrame` of feature importance scores with 6 columns:
index: The feature name.
'importance': The estimated feature importance score.
'stddev': The standard deviation of the feature importance score. If NaN, then not enough num_shuffle_sets were used to calculate a variance.
'p_value': P-value for a statistical t-test of the null hypothesis: importance = 0, vs the (one-sided) alternative: importance > 0.
Features with low p-value appear confidently useful to the predictor, while the other features may be useless to the predictor (or even harmful to include in its training data).
A p-value of 0.01 indicates that there is a 1% chance that the feature is useless or harmful, and a 99% chance that the feature is useful.
A p-value of 0.99 indicates that there is a 99% chance that the feature is useless or harmful, and a 1% chance that the feature is useful.
'n': The number of shuffles performed to estimate importance score (corresponds to sample-size used to determine confidence interval for true score).
'pXX_high': Upper end of XX% confidence interval for true feature importance score (where XX=99 by default).
'pXX_low': Lower end of XX% confidence interval for true feature importance score.
"""
self._assert_is_fit('feature_importance')
data = self.__get_dataset(data) if data is not None else data
if (data is None) and (not self._trainer.is_data_saved):
raise AssertionError(
'No data was provided and there is no cached data to load for feature importance calculation. `cache_data=True` must be set in the `TabularPredictor` init `learner_kwargs` argument call to enable this functionality when data is not specified.')
if data is not None:
# Avoid crash when indices are duplicated
data = data.reset_index(drop=True)
if num_shuffle_sets is None:
num_shuffle_sets = 10 if time_limit else 3
fi_df = self._learner.get_feature_importance(model=model, X=data, features=features,
feature_stage=feature_stage,
subsample_size=subsample_size, time_limit=time_limit,
num_shuffle_sets=num_shuffle_sets, silent=silent)
if include_confidence_band:
if confidence_level <= 0.5 or confidence_level >= 1.0:
raise ValueError("confidence_level must lie between 0.5 and 1.0")
ci_str = "{:0.0f}".format(confidence_level * 100)
import scipy.stats
num_features = len(fi_df)
ci_low_dict = dict()
ci_high_dict = dict()
for i in range(num_features):
fi = fi_df.iloc[i]
mean = fi['importance']
stddev = fi['stddev']
n = fi['n']
if stddev == np.nan or n == np.nan or mean == np.nan or n == 1:
ci_high = np.nan
ci_low = np.nan
else:
t_val = scipy.stats.t.ppf(1 - (1 - confidence_level) / 2, n - 1)
ci_high = mean + t_val * stddev / math.sqrt(n)
ci_low = mean - t_val * stddev / math.sqrt(n)
ci_high_dict[fi.name] = ci_high
ci_low_dict[fi.name] = ci_low
high_str = 'p' + ci_str + '_high'
low_str = 'p' + ci_str + '_low'
fi_df[high_str] = pd.Series(ci_high_dict)
fi_df[low_str] = pd.Series(ci_low_dict)
return fi_df
def persist_models(self, models='best', with_ancestors=True, max_memory=0.1) -> list:
"""
Persist models in memory for reduced inference latency. This is particularly important if the models are being used for online-inference where low latency is critical.
If models are not persisted in memory, they are loaded from disk every time they are asked to make predictions.
Parameters
----------
models : list of str or str, default = 'best'
Model names of models to persist.
If 'best' then the model with the highest validation score is persisted (this is the model used for prediction by default).
If 'all' then all models are persisted.
Valid models are listed in this `predictor` by calling `predictor.get_model_names()`.
with_ancestors : bool, default = True
If True, all ancestor models of the provided models will also be persisted.
If False, stacker models will not have the models they depend on persisted unless those models were specified in `models`. This will slow down inference as the ancestor models will still need to be loaded from disk for each predict call.
Only relevant for stacker models.
max_memory : float, default = 0.1
Proportion of total available memory to allow for the persisted models to use.
If the models' summed memory usage requires a larger proportion of memory than max_memory, they are not persisted. In this case, the output will be an empty list.
If None, then models are persisted regardless of estimated memory usage. This can cause out-of-memory errors.
Returns
-------
List of persisted model names.
"""
self._assert_is_fit('persist_models')
return self._learner.persist_trainer(low_memory=False, models=models, with_ancestors=with_ancestors,
max_memory=max_memory)
def unpersist_models(self, models='all') -> list:
"""
Unpersist models in memory for reduced memory usage.
If models are not persisted in memory, they are loaded from disk every time they are asked to make predictions.
Note: Another way to reset the predictor and unpersist models is to reload the predictor from disk via `predictor = TabularPredictor.load(predictor.path)`.
Parameters
----------
models : list of str or str, default = 'all'
Model names of models to unpersist.
If 'all' then all models are unpersisted.
Valid models are listed in this `predictor` by calling `predictor.get_model_names_persisted()`.
Returns
-------
List of unpersisted model names.
"""
self._assert_is_fit('unpersist_models')
return self._learner.load_trainer().unpersist_models(model_names=models)
def refit_full(self, model='all'):
"""
Retrain model on all of the data (training + validation).
For bagged models:
Optimizes a model's inference time by collapsing bagged ensembles into a single model fit on all of the training data.
This process will typically result in a slight accuracy reduction and a large inference speedup.
The inference speedup will generally be between 10-200x faster than the original bagged ensemble model.
The inference speedup factor is equivalent to (k * n), where k is the number of folds (`num_bag_folds`) and n is the number of finished repeats (`num_bag_sets`) in the bagged ensemble.
The runtime is generally 10% or less of the original fit runtime.
The runtime can be roughly estimated as 1 / (k * n) of the original fit runtime, with k and n defined above.
For non-bagged models:
Optimizes a model's accuracy by retraining on 100% of the data without using a validation set.
Will typically result in a slight accuracy increase and no change to inference time.
The runtime will be approximately equal to the original fit runtime.
This process does not alter the original models, but instead adds additional models.
If stacker models are refit by this process, they will use the refit_full versions of the ancestor models during inference.
Models produced by this process will not have validation scores, as they use all of the data for training.
Therefore, it is up to the user to determine if the models are of sufficient quality by including test data in `predictor.leaderboard(test_data)`.
If the user does not have additional test data, they should reference the original model's score for an estimate of the performance of the refit_full model.
Warning: Be aware that utilizing refit_full models without separately verifying on test data means that the model is untested, and has no guarantee of being consistent with the original model.
`cache_data` must have been set to `True` during the original training to enable this functionality.
Parameters
----------
model : str, default = 'all'
Model name of model to refit.
If 'all' then all models are refitted.
If 'best' then the model with the highest validation score is refit.
All ancestor models will also be refit in the case that the selected model is a weighted or stacker ensemble.
Valid models are listed in this `predictor` by calling `predictor.get_model_names()`.
Returns
-------
Dictionary of original model names -> refit_full model names.
"""
self._assert_is_fit('refit_full')
refit_full_dict = self._learner.refit_ensemble_full(model=model)
return refit_full_dict
def get_model_best(self):
"""
Returns the string model name of the best model by validation score.
This is typically the same model used during inference when `predictor.predict` is called without specifying a model.
Returns
-------
String model name of the best model
"""
self._assert_is_fit('get_model_best')
return self._trainer.get_model_best(can_infer=True)
def get_model_full_dict(self):
"""
Returns a dictionary of original model name -> refit full model name.
Empty unless `refit_full=True` was set during fit or `predictor.refit_full()` was called.
This can be useful when determining the best model based off of `predictor.leaderboard()`, then getting the _FULL version of the model by passing its name as the key to this dictionary.
Returns
-------
Dictionary of original model name -> refit full model name.
"""
self._assert_is_fit('get_model_full_dict')
return copy.deepcopy(self._trainer.model_full_dict)
def info(self):
"""
[EXPERIMENTAL] Returns a dictionary of `predictor` metadata.
Warning: This functionality is currently in preview mode.
The metadata information returned may change in structure in future versions without warning.
The definitions of various metadata values are not yet documented.
The output of this function should not be used for programmatic decisions.
Contains information such as row count, column count, model training time, validation scores, hyperparameters, and much more.
Returns
-------
Dictionary of `predictor` metadata.
"""
self._assert_is_fit('info')
return self._learner.get_info(include_model_info=True)
# TODO: Add data argument
# TODO: Add option to disable OOF generation of newly fitted models
# TODO: Move code logic to learner/trainer
# TODO: Add fit() arg to perform this automatically at end of training
# TODO: Consider adding cutoff arguments such as top-k models
def fit_weighted_ensemble(self, base_models: list = None, name_suffix='Best', expand_pareto_frontier=False,
time_limit=None):
"""
Fits new weighted ensemble models to combine predictions of previously-trained models.
`cache_data` must have been set to `True` during the original training to enable this functionality.
Parameters
----------
base_models : list, default = None
List of model names the weighted ensemble can consider as candidates.
If None, all previously trained models are considered except for weighted ensemble models.
As an example, to train a weighted ensemble that can only have weights assigned to the models 'model_a' and 'model_b', set `base_models=['model_a', 'model_b']`
name_suffix : str, default = 'Best'
Name suffix to add to the name of the newly fitted ensemble model.
expand_pareto_frontier : bool, default = False
If True, will train N-1 weighted ensemble models instead of 1, where `N=len(base_models)`.
The final model trained when True is equivalent to the model trained when False.
These weighted ensemble models will attempt to expand the pareto frontier.
This will create many different weighted ensembles which have different accuracy/memory/inference-speed trade-offs.
This is particularly useful when inference speed is an important consideration.
time_limit : int, default = None
Time in seconds each weighted ensemble model is allowed to train for. If `expand_pareto_frontier=True`, the `time_limit` value is applied to each model.
If None, the ensemble models train without time restriction.
Returns
-------
List of newly trained weighted ensemble model names.
If an exception is encountered while training an ensemble model, that model's name will be absent from the list.
"""
self._assert_is_fit('fit_weighted_ensemble')
trainer = self._learner.load_trainer()
if trainer.bagged_mode:
X = trainer.load_X()
y = trainer.load_y()
fit = True
else:
X = trainer.load_X_val()
y = trainer.load_y_val()
fit = False
stack_name = 'aux1'
if base_models is None:
base_models = trainer.get_model_names(stack_name='core')
X_stack_preds = trainer.get_inputs_to_stacker(X=X, base_models=base_models, fit=fit, use_orig_features=False)
models = []
if expand_pareto_frontier:
leaderboard = self.leaderboard(silent=True)
leaderboard = leaderboard[leaderboard['model'].isin(base_models)]
leaderboard = leaderboard.sort_values(by='pred_time_val')
models_to_check = leaderboard['model'].tolist()
for i in range(1, len(models_to_check) - 1):
models_to_check_now = models_to_check[:i + 1]
max_base_model_level = max([trainer.get_model_level(base_model) for base_model in models_to_check_now])
weighted_ensemble_level = max_base_model_level + 1
models += trainer.generate_weighted_ensemble(X=X_stack_preds, y=y, level=weighted_ensemble_level,
stack_name=stack_name,
base_model_names=models_to_check_now,
name_suffix=name_suffix + '_Pareto' + str(i),
time_limit=time_limit)
max_base_model_level = max([trainer.get_model_level(base_model) for base_model in base_models])
weighted_ensemble_level = max_base_model_level + 1
models += trainer.generate_weighted_ensemble(X=X_stack_preds, y=y, level=weighted_ensemble_level,
stack_name=stack_name, base_model_names=base_models,
name_suffix=name_suffix, time_limit=time_limit)
return models
def get_oof_pred(self, model: str = None, transformed=False, train_data=None, internal_oof=False) -> pd.Series:
"""
Note: This is advanced functionality not intended for normal usage.
Returns the out-of-fold (OOF) predictions for every row in the training data.
For more information, refer to `get_oof_pred_proba()` documentation.
Parameters
----------
model : str (optional)
Refer to `get_oof_pred_proba()` documentation.
transformed : bool, default = False
Refer to `get_oof_pred_proba()` documentation.
train_data : pd.DataFrame, default = None
Refer to `get_oof_pred_proba()` documentation.
internal_oof : bool, default = False
Refer to `get_oof_pred_proba()` documentation.
Returns
-------
:class:`pd.Series` object of the out-of-fold training predictions of the model.
"""
self._assert_is_fit('get_oof_pred')
y_pred_proba_oof = self.get_oof_pred_proba(model=model,
transformed=transformed,
as_multiclass=True,
train_data=train_data,
internal_oof=internal_oof)
return get_pred_from_proba_df(y_pred_proba_oof, problem_type=self.problem_type)
# TODO: Improve error messages when trying to get oof from refit_full and distilled models.
# TODO: v0.1 add tutorial related to this method, as it is very powerful.
# TODO: Remove train_data argument once we start caching the raw original data: Can just load that instead.
def get_oof_pred_proba(self, model: str = None, transformed=False, as_multiclass=True, train_data=None,
internal_oof=False) -> Union[pd.DataFrame, pd.Series]:
"""
Note: This is advanced functionality not intended for normal usage.
Returns the out-of-fold (OOF) predicted class probabilities for every row in the training data.
OOF prediction probabilities may provide unbiased estimates of generalization accuracy (reflecting how predictions will behave on new data)
Predictions for each row are only made using models that were fit to a subset of data where this row was held-out.
Warning: This method will raise an exception if called on a model that is not a bagged ensemble. Only bagged models (such a stacker models) can produce OOF predictions.
This also means that refit_full models and distilled models will raise an exception.
Warning: If intending to join the output of this method with the original training data, be aware that a rare edge-case issue exists:
Multiclass problems with rare classes combined with the use of the 'log_loss' eval_metric may have forced AutoGluon to duplicate rows in the training data to satisfy minimum class counts in the data.
If this has occurred, then the indices and row counts of the returned :class:`pd.Series` in this method may not align with the training data.
In this case, consider fetching the processed training data using `predictor.load_data_internal()` instead of using the original training data.
A more benign version of this issue occurs when 'log_loss' wasn't specified as the eval_metric but rare classes were dropped by AutoGluon.
In this case, not all of the original training data rows will have an OOF prediction. It is recommended to either drop these rows during the join or to get direct predictions on the missing rows via :meth:`TabularPredictor.predict_proba`.
Parameters
----------
model : str (optional)
The name of the model to get out-of-fold predictions from. Defaults to None, which uses the highest scoring model on the validation set.
Valid models are listed in this `predictor` by calling `predictor.get_model_names()`
transformed : bool, default = False
Whether the output values should be of the original label representation (False) or the internal label representation (True).
The internal representation for binary and multiclass classification are integers numbering the k possible classes from 0 to k-1, while the original representation is identical to the label classes provided during fit.
Generally, most users will want the original representation and keep `transformed=False`.
as_multiclass : bool, default = True
Whether to return binary classification probabilities as if they were for multiclass classification.
Output will contain two columns, and if `transformed=False`, the column names will correspond to the binary class labels.
The columns will be the same order as `predictor.class_labels`.
If False, output will contain only 1 column for the positive class (get positive_class name via `predictor.positive_class`).
Only impacts output for binary classification problems.
train_data : pd.DataFrame, default = None
Specify the original `train_data` to ensure that any training rows that were originally dropped internally are properly handled.
If None, then output will not contain all rows if training rows were dropped internally during fit.
internal_oof : bool, default = False
[Advanced Option] Return the internal OOF preds rather than the externally facing OOF preds.
Internal OOF preds may have more/fewer rows than was provided in train_data, and are incompatible with external data.
If you don't know what this does, keep it as False.
Returns
-------
:class:`pd.Series` or :class:`pd.DataFrame` object of the out-of-fold training prediction probabilities of the model.
"""
self._assert_is_fit('get_oof_pred_proba')
if model is None:
model = self.get_model_best()
if not self._trainer.bagged_mode:
raise AssertionError('Predictor must be in bagged mode to get out-of-fold predictions.')
if model in self._trainer._model_full_dict_val_score:
# FIXME: This is a hack, add refit tag in a nicer way than via the _model_full_dict_val_score
# TODO: bagged-with-holdout refit to bagged-no-holdout should still be able to return out-of-fold predictions
raise AssertionError('_FULL models do not have out-of-fold predictions.')
if self._trainer.get_model_attribute_full(model=model, attribute='val_in_fit', func=max):
raise AssertionError(
f'Model {model} does not have out-of-fold predictions because it used a validation set during training.')
y_pred_proba_oof_transformed = self.transform_features(base_models=[model], return_original_features=False)
if not internal_oof:
is_duplicate_index = y_pred_proba_oof_transformed.index.duplicated(keep='first')
if is_duplicate_index.any():
logger.log(20,
'Detected duplicate indices... This means that data rows may have been duplicated during training. '
'Removing all duplicates except for the first instance.')
y_pred_proba_oof_transformed = y_pred_proba_oof_transformed[is_duplicate_index == False]
if self._learner._pre_X_rows is not None and len(y_pred_proba_oof_transformed) < self._learner._pre_X_rows:
len_diff = self._learner._pre_X_rows - len(y_pred_proba_oof_transformed)
if train_data is None:
logger.warning(f'WARNING: {len_diff} rows of training data were dropped internally during fit. '
f'The output will not contain all original training rows.\n'
f'If attempting to get `oof_pred_proba`, DO NOT pass `train_data` into `predictor.predict_proba` or `predictor.transform_features`!\n'
f'Instead this can be done by the following '
f'(Ensure `train_data` is identical to when it was used in fit):\n'
f'oof_pred_proba = predictor.get_oof_pred_proba(train_data=train_data)\n'
f'oof_pred = predictor.get_oof_pred(train_data=train_data)\n')
else:
missing_idx = list(train_data.index.difference(y_pred_proba_oof_transformed.index))
if len(missing_idx) > 0:
missing_idx_data = train_data.loc[missing_idx]
missing_pred_proba = self.transform_features(data=missing_idx_data, base_models=[model],
return_original_features=False)
y_pred_proba_oof_transformed = pd.concat([y_pred_proba_oof_transformed, missing_pred_proba])
y_pred_proba_oof_transformed = y_pred_proba_oof_transformed.reindex(list(train_data.index))
if self.problem_type == MULTICLASS and self._learner.label_cleaner.problem_type_transform == MULTICLASS:
y_pred_proba_oof_transformed.columns = copy.deepcopy(
self._learner.label_cleaner.ordered_class_labels_transformed)
elif self.problem_type == QUANTILE:
y_pred_proba_oof_transformed.columns = self.quantile_levels
else:
y_pred_proba_oof_transformed.columns = [self.label]
y_pred_proba_oof_transformed = y_pred_proba_oof_transformed[self.label]
if as_multiclass and self.problem_type == BINARY:
y_pred_proba_oof_transformed = LabelCleanerMulticlassToBinary.convert_binary_proba_to_multiclass_proba(
y_pred_proba_oof_transformed, as_pandas=True)
elif self.problem_type == MULTICLASS:
if transformed:
y_pred_proba_oof_transformed = LabelCleanerMulticlassToBinary.convert_binary_proba_to_multiclass_proba(
y_pred_proba_oof_transformed, as_pandas=True)
y_pred_proba_oof_transformed.columns = copy.deepcopy(
self._learner.label_cleaner.ordered_class_labels_transformed)
if transformed:
return y_pred_proba_oof_transformed
else:
return self.transform_labels(labels=y_pred_proba_oof_transformed, inverse=True, proba=True)
@property
def positive_class(self):
"""
Returns the positive class name in binary classification. Useful for computing metrics such as F1 which require a positive and negative class.
In binary classification, :class:`TabularPredictor.predict_proba(as_multiclass=False)` returns the estimated probability that each row belongs to the positive class.
Will print a warning and return None if called when `predictor.problem_type != 'binary'`.
Returns
-------
The positive class name in binary classification or None if the problem is not binary classification.
"""
return self._learner.positive_class
def load_data_internal(self, data='train', return_X=True, return_y=True):
"""
Loads the internal data representation used during model training.
Individual AutoGluon models like the neural network may apply additional feature transformations that are not reflected in this method.
This method only applies universal transforms employed by all AutoGluon models.
Warning, the internal representation may:
Have different features compared to the original data.
Have different row counts compared to the original data.
Have indices which do not align with the original data.
Have label values which differ from those in the original data.
Internal data representations should NOT be combined with the original data, in most cases this is not possible.
Parameters
----------
data : str, default = 'train'
The data to load.
Valid values are:
'train':
Load the training data used during model training.
This is a transformed and augmented version of the `train_data` passed in `fit()`.
'val':
Load the validation data used during model training.
This is a transformed and augmented version of the `tuning_data` passed in `fit()`.
If `tuning_data=None` was set in `fit()`, then `tuning_data` is an automatically generated validation set created by splitting `train_data`.
Warning: Will raise an exception if called by a bagged predictor, as bagged predictors have no validation data.
return_X : bool, default = True
Whether to return the internal data features
If set to `False`, then the first element in the returned tuple will be None.
return_y : bool, default = True
Whether to return the internal data labels
If set to `False`, then the second element in the returned tuple will be None.
Returns
-------
Tuple of (:class:`pd.DataFrame`, :class:`pd.Series`) corresponding to the internal data features and internal data labels, respectively.
"""
self._assert_is_fit('load_data_internal')
if data == 'train':
load_X = self._trainer.load_X
load_y = self._trainer.load_y
elif data == 'val':
load_X = self._trainer.load_X_val
load_y = self._trainer.load_y_val
else:
raise ValueError(f'data must be one of: [\'train\', \'val\'], but was \'{data}\'.')
X = load_X() if return_X else None
y = load_y() if return_y else None
return X, y
def save_space(self, remove_data=True, remove_fit_stack=True, requires_save=True, reduce_children=False):
"""
Reduces the memory and disk size of predictor by deleting auxiliary model files that aren't needed for prediction on new data.
This function has NO impact on inference accuracy.
It is recommended to invoke this method if the only goal is to use the trained model for prediction.
However, certain advanced functionality may no longer be available after `save_space()` has been called.
Parameters
----------
remove_data : bool, default = True
Whether to remove cached files of the original training and validation data.
Only reduces disk usage, it has no impact on memory usage.
This is especially useful when the original data was large.
This is equivalent to setting `cache_data=False` during the original `fit()`.
Will disable all advanced functionality that requires `cache_data=True`.
remove_fit_stack : bool, default = True
Whether to remove information required to fit new stacking models and continue fitting bagged models with new folds.
Only reduces disk usage, it has no impact on memory usage.
This includes:
out-of-fold (OOF) predictions
This is useful for multiclass problems with many classes, as OOF predictions can become very large on disk. (1 GB per model in extreme cases)
This disables `predictor.refit_full()` for stacker models.
requires_save : bool, default = True
Whether to remove information that requires the model to be saved again to disk.
Typically this only includes flag variables that don't have significant impact on memory or disk usage, but should technically be updated due to the removal of more important information.
An example is the `is_data_saved` boolean variable in `trainer`, which should be updated to `False` if `remove_data=True` was set.
reduce_children : bool, default = False
Whether to apply the reduction rules to bagged ensemble children models. These are the models trained for each fold of the bagged ensemble.
This should generally be kept as `False` since the most important memory and disk reduction techniques are automatically applied to these models during the original `fit()` call.
"""
self._assert_is_fit('save_space')
self._trainer.reduce_memory_size(remove_data=remove_data, remove_fit_stack=remove_fit_stack, remove_fit=True,
remove_info=False, requires_save=requires_save,
reduce_children=reduce_children)
def delete_models(self, models_to_keep=None, models_to_delete=None, allow_delete_cascade=False,
delete_from_disk=True, dry_run=True):
"""
Deletes models from `predictor`.
This can be helpful to minimize memory usage and disk usage, particularly for model deployment.
This will remove all references to the models in `predictor`.
For example, removed models will not appear in `predictor.leaderboard()`.
WARNING: If `delete_from_disk=True`, this will DELETE ALL FILES in the deleted model directories, regardless if they were created by AutoGluon or not.
DO NOT STORE FILES INSIDE OF THE MODEL DIRECTORY THAT ARE UNRELATED TO AUTOGLUON.
Parameters
----------
models_to_keep : str or list, default = None
Name of model or models to not delete.
All models that are not specified and are also not required as a dependency of any model in `models_to_keep` will be deleted.
Specify `models_to_keep='best'` to keep only the best model and its model dependencies.
`models_to_delete` must be None if `models_to_keep` is set.
To see the list of possible model names, use: `predictor.get_model_names()` or `predictor.leaderboard()`.
models_to_delete : str or list, default = None
Name of model or models to delete.
All models that are not specified but depend on a model in `models_to_delete` will also be deleted.
`models_to_keep` must be None if `models_to_delete` is set.
allow_delete_cascade : bool, default = False
If `False`, if unspecified dependent models of models in `models_to_delete` exist an exception will be raised instead of deletion occurring.
An example of a dependent model is m1 if m2 is a stacker model and takes predictions from m1 as inputs. In this case, m1 would be a dependent model of m2.
If `True`, all dependent models of models in `models_to_delete` will be deleted.
Has no effect if `models_to_delete=None`.
delete_from_disk : bool, default = True
If `True`, deletes the models from disk if they were persisted.
WARNING: This deletes the entire directory for the deleted models, and ALL FILES located there.
It is highly recommended to first run with `dry_run=True` to understand which directories will be deleted.
dry_run : bool, default = True
If `True`, then deletions don't occur, and logging statements are printed describing what would have occurred.
Set `dry_run=False` to perform the deletions.
"""
self._assert_is_fit('delete_models')
if models_to_keep == 'best':
models_to_keep = self._trainer.model_best
if models_to_keep is None:
models_to_keep = self._trainer.get_model_best()
self._trainer.delete_models(models_to_keep=models_to_keep, models_to_delete=models_to_delete,
allow_delete_cascade=allow_delete_cascade, delete_from_disk=delete_from_disk,
dry_run=dry_run)
# TODO: v0.1 add documentation for arguments
def get_model_names(self, stack_name=None, level=None, can_infer: bool = None, models: list = None) -> list:
"""Returns the list of model names trained in this `predictor` object."""
self._assert_is_fit('get_model_names')
return self._trainer.get_model_names(stack_name=stack_name, level=level, can_infer=can_infer, models=models)
def get_model_names_persisted(self) -> list:
"""Returns the list of model names which are persisted in memory."""
self._assert_is_fit('get_model_names_persisted')
return list(self._learner.load_trainer().models.keys())
def distill(self, train_data=None, tuning_data=None, augmentation_data=None, time_limit=None, hyperparameters=None,
holdout_frac=None,
teacher_preds='soft', augment_method='spunge', augment_args={'size_factor': 5, 'max_size': int(1e5)},
models_name_suffix=None, verbosity=None):
"""
Distill AutoGluon's most accurate ensemble-predictor into single models which are simpler/faster and require less memory/compute.
Distillation can produce a model that is more accurate than the same model fit directly on the original training data.
After calling `distill()`, there will be more models available in this Predictor, which can be evaluated using `predictor.leaderboard(test_data)` and deployed with: `predictor.predict(test_data, model=MODEL_NAME)`.
This will raise an exception if `cache_data=False` was previously set in `fit()`.
NOTE: Until catboost v0.24 is released, `distill()` with CatBoost students in multiclass classification requires you to first install catboost-dev: `pip install catboost-dev`
Parameters
----------
train_data : str or :class:`TabularDataset` or :class:`pd.DataFrame`, default = None
Same as `train_data` argument of `fit()`.
If None, the same training data will be loaded from `fit()` call used to produce this Predictor.
tuning_data : str or :class:`TabularDataset` or :class:`pd.DataFrame`, default = None
Same as `tuning_data` argument of `fit()`.
If `tuning_data = None` and `train_data = None`: the same training/validation splits will be loaded from `fit()` call used to produce this Predictor,
unless bagging/stacking was previously used in which case a new training/validation split is performed.
augmentation_data : :class:`TabularDataset` or :class:`pd.DataFrame`, default = None
An optional extra dataset of unlabeled rows that can be used for augmenting the dataset used to fit student models during distillation (ignored if None).
time_limit : int, default = None
Approximately how long (in seconds) the distillation process should run for.
If None, no time-constraint will be enforced allowing the distilled models to fully train.
hyperparameters : dict or str, default = None
Specifies which models to use as students and what hyperparameter-values to use for them.
Same as `hyperparameters` argument of `fit()`.
If = None, then student models will use the same hyperparameters from `fit()` used to produce this Predictor.
Note: distillation is currently only supported for ['GBM','NN','RF','CAT'] student models, other models and their hyperparameters are ignored here.
holdout_frac : float
Same as `holdout_frac` argument of :meth:`TabularPredictor.fit`.
teacher_preds : str, default = 'soft'
What form of teacher predictions to distill from (teacher refers to the most accurate AutoGluon ensemble-predictor).
If None, we only train with original labels (no data augmentation).
If 'hard', labels are hard teacher predictions given by: `teacher.predict()`
If 'soft', labels are soft teacher predictions given by: `teacher.predict_proba()`
Note: 'hard' and 'soft' are equivalent for regression problems.
If `augment_method` is not None, teacher predictions are only used to label augmented data (training data keeps original labels).
To apply label-smoothing: `teacher_preds='onehot'` will use original training data labels converted to one-hot vectors for multiclass problems (no data augmentation).
augment_method : str, default='spunge'
Specifies method to use for generating augmented data for distilling student models.
Options include:
None : no data augmentation performed.
'munge' : The MUNGE algorithm (https://www.cs.cornell.edu/~caruana/compression.kdd06.pdf).
'spunge' : A simpler, more efficient variant of the MUNGE algorithm.
augment_args : dict, default = {'size_factor':5, 'max_size': int(1e5)}
Contains the following kwargs that control the chosen `augment_method` (these are ignored if `augment_method=None`):
'num_augmented_samples': int, number of augmented datapoints used during distillation. Overrides 'size_factor', 'max_size' if specified.
'max_size': float, the maximum number of augmented datapoints to add (ignored if 'num_augmented_samples' specified).
'size_factor': float, if n = training data sample-size, we add int(n * size_factor) augmented datapoints, up to 'max_size'.
Larger values in `augment_args` will slow down the runtime of distill(), and may produce worse results if provided time_limit are too small.
You can also pass in kwargs for the `spunge_augment`, `munge_augment` functions in `autogluon.tabular.augmentation.distill_utils`.
models_name_suffix : str, default = None
Optional suffix that can be appended at the end of all distilled student models' names.
Note: all distilled models will contain '_DSTL' substring in their name by default.
verbosity : int, default = None
Controls amount of printed output during distillation (4 = highest, 0 = lowest).
Same as `verbosity` parameter of :class:`TabularPredictor`.
If None, the same `verbosity` used in previous fit is employed again.
Returns
-------
List of names (str) corresponding to the distilled models.
Examples
--------
>>> from autogluon.tabular import TabularDataset, TabularPredictor
>>> train_data = TabularDataset('train.csv')
>>> predictor = TabularPredictor(label='class').fit(train_data, auto_stack=True)
>>> distilled_model_names = predictor.distill()
>>> test_data = TabularDataset('test.csv')
>>> ldr = predictor.leaderboard(test_data)
>>> model_to_deploy = distilled_model_names[0]
>>> predictor.predict(test_data, model=model_to_deploy)
"""
self._assert_is_fit('distill')
if isinstance(hyperparameters, str):
hyperparameters = get_hyperparameter_config(hyperparameters)
return self._learner.distill(X=train_data, X_val=tuning_data, time_limit=time_limit,
hyperparameters=hyperparameters, holdout_frac=holdout_frac,
verbosity=verbosity, models_name_suffix=models_name_suffix,
teacher_preds=teacher_preds,
augmentation_data=augmentation_data, augment_method=augment_method,
augment_args=augment_args)
def plot_ensemble_model(self, prune_unused_nodes=True) -> str:
"""
Output the visualized stack ensemble architecture of a model trained by `fit()`.
The plot is stored to a file, `ensemble_model.png` in folder `predictor.path`
This function requires `graphviz` and `pygraphviz` to be installed because this visualization depends on those package.
Unless this function will raise `ImportError` without being able to generate the visual of the ensemble model.
To install the required package, run the below commands (for Ubuntu linux):
$ sudo apt-get install graphviz
$ pip install graphviz
For other platforms, refer to https://graphviz.org/ for Graphviz install, and https://pygraphviz.github.io/documentation.html for PyGraphviz.
Parameters
----------
Returns
-------
The file name with the full path to the saved graphic
"""
self._assert_is_fit('plot_ensemble_model')
try:
import pygraphviz
except:
raise ImportError('Visualizing ensemble network architecture requires pygraphviz library')
G = self._trainer.model_graph.copy()
if prune_unused_nodes == True:
nodes_without_outedge = [node for node, degree in dict(G.degree()).items() if degree < 1]
else:
nodes_without_outedge = []
nodes_no_val_score = [node for node in G if G.nodes[node]['val_score'] == None]
G.remove_nodes_from(nodes_without_outedge)
G.remove_nodes_from(nodes_no_val_score)
root_node = [n for n, d in G.out_degree() if d == 0]
best_model_node = self.get_model_best()
A = nx.nx_agraph.to_agraph(G)
A.graph_attr.update(rankdir='BT')
A.node_attr.update(fontsize=10)
A.node_attr.update(shape='rectangle')
for node in A.iternodes():
node.attr['label'] = f"{node.name}\nVal score: {float(node.attr['val_score']):.4f}"
if node.name == best_model_node:
node.attr['style'] = 'filled'
node.attr['fillcolor'] = '#ff9900'
node.attr['shape'] = 'box3d'
elif nx.has_path(G, node.name, best_model_node):
node.attr['style'] = 'filled'
node.attr['fillcolor'] = '#ffcc00'
model_image_fname = os.path.join(self.path, 'ensemble_model.png')
A.draw(model_image_fname, format='png', prog='dot')
return model_image_fname
@staticmethod
def _summarize(key, msg, results):
if key in results:
print(msg + ": " + str(results[key]))
@staticmethod
def __get_dataset(data):
if isinstance(data, TabularDataset):
return data
elif isinstance(data, pd.DataFrame):
return TabularDataset(data)
elif isinstance(data, str):
return TabularDataset(data)
elif isinstance(data, pd.Series):
raise TypeError("data must be TabularDataset or pandas.DataFrame, not pandas.Series. \
To predict on just single example (ith row of table), use data.iloc[[i]] rather than data.iloc[i]")
else:
raise TypeError("data must be TabularDataset or pandas.DataFrame or str file path to data")
def _validate_hyperparameter_tune_kwargs(self, hyperparameter_tune_kwargs, time_limit=None):
"""
Returns True if hyperparameter_tune_kwargs is None or can construct a valid scheduler.
Returns False if hyperparameter_tune_kwargs results in an invalid scheduler.
"""
if hyperparameter_tune_kwargs is None:
return True
scheduler_cls, scheduler_params = scheduler_factory(hyperparameter_tune_kwargs=hyperparameter_tune_kwargs,
time_out=time_limit,
nthreads_per_trial='auto', ngpus_per_trial='auto')
assert scheduler_params[
'searcher'] != 'bayesopt_hyperband', "searcher == 'bayesopt_hyperband' not yet supported"
if scheduler_params.get('dist_ip_addrs', None):
logger.warning(
'Warning: dist_ip_addrs does not currently work for Tabular. Distributed instances will not be utilized.')
if scheduler_params['num_trials'] == 1:
logger.warning(
'Warning: Specified num_trials == 1 for hyperparameter tuning, disabling HPO. This can occur if time_limit was not specified in `fit()`.')
return False
scheduler_ngpus = scheduler_params['resource'].get('num_gpus', 0)
if scheduler_ngpus is not None and isinstance(scheduler_ngpus, int) and scheduler_ngpus > 1:
logger.warning(
f"Warning: TabularPredictor currently doesn't use >1 GPU per training run. Detected {scheduler_ngpus} GPUs.")
return True
def _set_hyperparameter_tune_kwargs_in_ag_args(self, hyperparameter_tune_kwargs, ag_args, time_limit):
if hyperparameter_tune_kwargs is not None and 'hyperparameter_tune_kwargs' not in ag_args:
if 'hyperparameter_tune_kwargs' in ag_args:
AssertionError(
'hyperparameter_tune_kwargs was specified in both ag_args and in kwargs. Please only specify once.')
else:
ag_args['hyperparameter_tune_kwargs'] = hyperparameter_tune_kwargs
if not self._validate_hyperparameter_tune_kwargs(ag_args.get('hyperparameter_tune_kwargs', None), time_limit):
ag_args.pop('hyperparameter_tune_kwargs', None)
if ag_args.get('hyperparameter_tune_kwargs', None) is not None:
logger.log(30,
'Warning: hyperparameter tuning is currently experimental and may cause the process to hang.')
return ag_args
def _set_post_fit_vars(self, learner: AbstractLearner = None):
if learner is not None:
self._learner: AbstractLearner = learner
self._learner_type = type(self._learner)
if self._learner.trainer_path is not None:
self._learner.persist_trainer(low_memory=True)
self._trainer: AbstractTrainer = self._learner.load_trainer() # Trainer object
@classmethod
def _load_version_file(cls, path) -> str:
version_file_path = path + cls._predictor_version_file_name
version = load_str.load(path=version_file_path)
return version
def _save_version_file(self, silent=False):
from ..version import __version__
version_file_contents = f'{__version__}'
version_file_path = self.path + self._predictor_version_file_name
save_str.save(path=version_file_path, data=version_file_contents, verbose=not silent)
def save(self, silent=False):
"""
Save this Predictor to file in directory specified by this Predictor's `path`.
Note that :meth:`TabularPredictor.fit` already saves the predictor object automatically
(we do not recommend modifying the Predictor object yourself as it tracks many trained models).
Parameters
----------
silent : bool, default = False
Whether to save without logging a message.
"""
path = self.path
tmp_learner = self._learner
tmp_trainer = self._trainer
self._learner.save()
self._learner = None
self._trainer = None
save_pkl.save(path=path + self.predictor_file_name, object=self)
self._learner = tmp_learner
self._trainer = tmp_trainer
self._save_version_file(silent=silent)
if not silent:
logger.log(20, f'TabularPredictor saved. To load, use: predictor = TabularPredictor.load("{self.path}")')
@classmethod
def _load(cls, path: str):
"""
Inner load method, called in `load`.
"""
predictor: TabularPredictor = load_pkl.load(path=path + cls.predictor_file_name)
learner = predictor._learner_type.load(path)
predictor._set_post_fit_vars(learner=learner)
return predictor
@classmethod
def load(cls, path: str, verbosity: int = None, require_version_match: bool = True):
"""
Load a TabularPredictor object previously produced by `fit()` from file and returns this object. It is highly recommended the predictor be loaded with the exact AutoGluon version it was fit with.
Parameters
----------
path : str
The path to directory in which this Predictor was previously saved.
verbosity : int, default = None
Sets the verbosity level of this Predictor after it is loaded.
Valid values range from 0 (least verbose) to 4 (most verbose).
If None, logging verbosity is not changed from existing values.
Specify larger values to see more information printed when using Predictor during inference, smaller values to see less information.
Refer to TabularPredictor init for more information.
require_version_match : bool, default = True
If True, will raise an AssertionError if the `autogluon.tabular` version of the loaded predictor does not match the installed version of `autogluon.tabular`.
If False, will allow loading of models trained on incompatible versions, but is NOT recommended. Users may run into numerous issues if attempting this.
"""
if verbosity is not None:
set_logger_verbosity(verbosity) # Reset logging after load (may be in new Python session)
if path is None:
raise ValueError("path cannot be None in load()")
try:
from ..version import __version__
version_load = __version__
except:
version_load = None
path = setup_outputdir(path, warn_if_exist=False) # replace ~ with absolute path if it exists
try:
version_init = cls._load_version_file(path=path)
except:
logger.warning(f'WARNING: Could not find version file at "{path + cls._predictor_version_file_name}".\n'
f'This means that the predictor was fit in a version `<=0.3.1`.')
version_init = None
if version_init is None:
predictor = cls._load(path=path)
try:
version_init = predictor._learner.version
except:
version_init = None
else:
predictor = None
if version_init is None:
version_init = 'Unknown (Likely <=0.0.11)'
if version_load != version_init:
logger.warning('')
logger.warning('############################## WARNING ##############################')
logger.warning('WARNING: AutoGluon version differs from the version used to create the predictor! '
'This may lead to instability and it is highly recommended the predictor be loaded '
'with the exact AutoGluon version it was created with.')
logger.warning(f'\tPredictor Version: {version_init}')
logger.warning(f'\tCurrent Version: {version_load}')
logger.warning('############################## WARNING ##############################')
logger.warning('')
if require_version_match:
raise AssertionError(
f'Predictor was created on version {version_init} but is being loaded with version {version_load}. '
f'Please ensure the versions match to avoid instability. While it is NOT recommended, '
f'this error can be bypassed by specifying `require_version_match=False`.')
if predictor is None:
predictor = cls._load(path=path)
return predictor
@staticmethod
def _validate_init_kwargs(kwargs):
valid_kwargs = {
'learner_type',
'learner_kwargs',
'quantile_levels',
}
invalid_keys = []
for key in kwargs:
if key not in valid_kwargs:
invalid_keys.append(key)
if invalid_keys:
raise ValueError(f'Invalid kwargs passed: {invalid_keys}\nValid kwargs: {list(valid_kwargs)}')
def _validate_fit_kwargs(self, kwargs):
# TODO:
# Valid core_kwargs values:
# ag_args, ag_args_fit, ag_args_ensemble, stack_name, ensemble_type, name_suffix, time_limit
# Valid aux_kwargs values:
# name_suffix, time_limit, stack_name, aux_hyperparameters, ag_args, ag_args_ensemble
# TODO: Remove features from models option for fit_extra
# TODO: Constructor?
fit_kwargs_default = dict(
# data split / ensemble architecture kwargs -> Don't nest but have nested documentation -> Actually do nesting
holdout_frac=None, # TODO: Potentially error if num_bag_folds is also specified
num_bag_folds=None,
# TODO: Potentially move to fit_extra, raise exception if value too large / invalid in fit_extra.
auto_stack=False,
use_bag_holdout=False,
# other
feature_generator='auto',
unlabeled_data=None,
_feature_generator_kwargs=None,
)
kwargs = self._validate_fit_extra_kwargs(kwargs, extra_valid_keys=list(fit_kwargs_default.keys()))
kwargs_sanitized = fit_kwargs_default.copy()
kwargs_sanitized.update(kwargs)
return kwargs_sanitized
def _fit_extra_kwargs_dict(self):
"""
Returns:
--------
dict of fit_extra args:
verbosity: Which levels of logger should be printed
pseudo_data: pseudo labeled data to be incorporated into train
but not used in validation
name_suffix: A suffix string to be added to the individual model names
"""
return dict(
# data split / ensemble architecture kwargs -> Don't nest but have nested documentation -> Actually do nesting
num_bag_sets=None,
num_stack_levels=None,
hyperparameter_tune_kwargs=None,
# core_kwargs -> +1 nest
ag_args=None,
ag_args_fit=None,
ag_args_ensemble=None,
excluded_model_types=None,
# aux_kwargs -> +1 nest
# post_fit_kwargs -> +1 nest
set_best_to_refit_full=False,
keep_only_best=False,
save_space=False,
refit_full=False,
# other
verbosity=self.verbosity,
feature_prune_kwargs=None,
# private
_save_bag_folds=None,
# quantile levels
quantile_levels=None,
calibrate=False,
# pseudo label
pseudo_data=None,
name_suffix=None
)
def _validate_fit_extra_kwargs(self, kwargs, extra_valid_keys=None):
fit_extra_kwargs_default = self._fit_extra_kwargs_dict()
allowed_kwarg_names = list(fit_extra_kwargs_default.keys())
if extra_valid_keys is not None:
allowed_kwarg_names += extra_valid_keys
for kwarg_name in kwargs.keys():
if kwarg_name not in allowed_kwarg_names:
public_kwarg_options = [kwarg for kwarg in allowed_kwarg_names if kwarg[0] != '_']
public_kwarg_options.sort()
raise ValueError(
f"Unknown keyword argument specified: {kwarg_name}\nValid kwargs: {public_kwarg_options}")
kwargs_sanitized = fit_extra_kwargs_default.copy()
kwargs_sanitized.update(kwargs)
# Deepcopy args to avoid altering outer context
deepcopy_args = ['ag_args', 'ag_args_fit', 'ag_args_ensemble', 'excluded_model_types']
for deepcopy_arg in deepcopy_args:
kwargs_sanitized[deepcopy_arg] = copy.deepcopy(kwargs_sanitized[deepcopy_arg])
refit_full = kwargs_sanitized['refit_full']
set_best_to_refit_full = kwargs_sanitized['set_best_to_refit_full']
if refit_full and not self._learner.cache_data:
raise ValueError(
'`refit_full=True` is only available when `cache_data=True`. Set `cache_data=True` to utilize `refit_full`.')
if set_best_to_refit_full and not refit_full:
raise ValueError(
'`set_best_to_refit_full=True` is only available when `refit_full=True`. Set `refit_full=True` to utilize `set_best_to_refit_full`.')
return kwargs_sanitized
def _prune_data_features(self, train_features: pd.DataFrame, other_features: pd.DataFrame, is_labeled: bool):
"""
Removes certain columns from the provided datasets that do not contain predictive features.
Parameters
----------
train_features : pd.DataFrame
The features/columns for the incoming training data
other_features : pd.DataFrame
Features of other auxiliary data that contains the same covariates as the training data.
Examples of this could be: tuning data, pseudo data
is_labeled: bool
Is other_features dataframe labeled or not
"""
if self.sample_weight is not None:
if self.sample_weight in train_features:
train_features.remove(self.sample_weight)
if self.sample_weight in other_features:
other_features.remove(self.sample_weight)
if self._learner.groups is not None and is_labeled:
train_features.remove(self._learner.groups)
return train_features, other_features
def _validate_fit_data(self, train_data, tuning_data=None, unlabeled_data=None):
if isinstance(train_data, str):
train_data = TabularDataset(train_data)
if tuning_data is not None and isinstance(tuning_data, str):
tuning_data = TabularDataset(tuning_data)
if unlabeled_data is not None and isinstance(unlabeled_data, str):
unlabeled_data = TabularDataset(unlabeled_data)
if not isinstance(train_data, pd.DataFrame):
raise AssertionError(
f'train_data is required to be a pandas DataFrame, but was instead: {type(train_data)}')
if len(set(train_data.columns)) < len(train_data.columns):
raise ValueError(
"Column names are not unique, please change duplicated column names (in pandas: train_data.rename(columns={'current_name':'new_name'})")
self._validate_unique_indices(data=train_data, name='train_data')
if tuning_data is not None:
if not isinstance(tuning_data, pd.DataFrame):
raise AssertionError(
f'tuning_data is required to be a pandas DataFrame, but was instead: {type(tuning_data)}')
self._validate_unique_indices(data=tuning_data, name='tuning_data')
train_features = [column for column in train_data.columns if column != self.label]
tuning_features = [column for column in tuning_data.columns if column != self.label]
train_features, tuning_features = self._prune_data_features(train_features=train_features,
other_features=tuning_features,
is_labeled=True)
train_features = np.array(train_features)
tuning_features = np.array(tuning_features)
if np.any(train_features != tuning_features):
raise ValueError("Column names must match between training and tuning data")
if unlabeled_data is not None:
if not isinstance(unlabeled_data, pd.DataFrame):
raise AssertionError(
f'unlabeled_data is required to be a pandas DataFrame, but was instead: {type(unlabeled_data)}')
self._validate_unique_indices(data=unlabeled_data, name='unlabeled_data')
train_features = [column for column in train_data.columns if column != self.label]
unlabeled_features = [column for column in unlabeled_data.columns]
train_features, unlabeled_features = self._prune_data_features(train_features=train_features,
other_features=unlabeled_features,
is_labeled=False)
train_features = sorted( | np.array(train_features) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 26 11:00:07 2020
@author: <NAME>
"""
import matplotlib.pyplot as plt
from scipy.spatial import distance
from scipy import signal
import numpy as np
# Constants
DEFAULT_NEURONUM = 500
DEFAULT_TEND = 7000
DEFAULT_IDRIVE = 3
DEFAULT_XNUME = 20
DEFAULT_YNUME = 20
DEFAULT_XNUMI = 10
DEFAULT_YNUMI = 10
DEFAULT_DEGREE_EE = 40
DEFAULT_DEGREE_EI = 10
DEFAULT_DEGREE_IE = 400
DEFAULT_DEGREE_II = 100
DEFAULT_WEIGHT_EE = 0.01
DEFAULT_WEIGHT_EI = 0.05
DEFAULT_WEIGHT_IE = 0.04
DEFAULT_WEIGHT_II = 0.04
DEFAULT_TAU_SYN = 3
DEFAULT_GKS_MIN = 0.2
DEFAULT_GKS_MAX = 1.5
# Class
class NeuroNet():
def __init__(self,
neuroNum = DEFAULT_NEURONUM,
tEnd = DEFAULT_TEND,
Idrive = DEFAULT_IDRIVE,
tauSyn = DEFAULT_TAU_SYN,
gKsMin = DEFAULT_GKS_MIN,
gKsMax = DEFAULT_GKS_MAX):
'''
Parameters
----------
neuroNum : TYPE, optional
DESCRIPTION. The default is DEFAULT_NEURONUM.
tEnd : TYPE, optional
DESCRIPTION. The default is DEFAULT_TEND.
Idrive : TYPE, optional
DESCRIPTION. The default is DEFAULT_IDRIVE.
tauSyn : TYPE, optional
DESCRIPTION. The default is DEFAULT_TAU_SYN.
Returns
-------
None.
'''
# simulation properties
self.tEnd = tEnd # ms
self.tStep = 0.05 # ms
self.tPoints = np.arange(0,self.tEnd,self.tStep)
# ensemble properties
self.neuroNum = neuroNum
self.Idrive = Idrive*np.ones(shape=(self.neuroNum,1))
# neuronal properties
self.gKsMin = gKsMin
self.gKsMax = gKsMax
self.randomInitialStates()
self.gKs = self.gKsMax
# initial adjMat
self.adjMat = np.zeros(shape=(self.neuroNum,self.neuroNum))
self.Esyn = np.zeros((self.neuroNum,1))
# 0 mV for excitatory synapses;
# -75mV for inhibitory synapses
self.tauSyn = DEFAULT_TAU_SYN*np.ones((self.neuroNum,1)) # ms
def randomInitialStates(self):
self.states = np.random.rand(self.neuroNum,4)
self.states[:,3] = -70 + 40 * self.states[:,3]
return self
def zerolikeInitialStates(self,logV=False):
originalDC = self.Idrive.copy()
originalT = self.tEnd
self.Idrive[:] = -1
self.tEnd = 500
self.tPoints = np.arange(0,self.tEnd,self.tStep) - self.tEnd
self.runSimulation(isNet = False,logV=logV)
if logV: self.tPoints_before = self.tPoints.copy()
self.Idrive = originalDC
self.tEnd = originalT
self.tPoints = np.arange(0,self.tEnd,self.tStep)
return self
def mexicanHat(self,
xNumE = DEFAULT_XNUME,
yNumE = DEFAULT_YNUME,
xNumI = DEFAULT_XNUMI,
yNumI = DEFAULT_YNUMI,
degreeEE = DEFAULT_DEGREE_EE,
degreeEI = DEFAULT_DEGREE_EI,
degreeIE = DEFAULT_DEGREE_IE,
degreeII = DEFAULT_DEGREE_II,
weightEE = DEFAULT_WEIGHT_EE,
weightEI = DEFAULT_WEIGHT_EI,
weightIE = DEFAULT_WEIGHT_IE,
weightII = DEFAULT_WEIGHT_II):
'''
Parameters
----------
xNumE : TYPE, optional
DESCRIPTION. The default is DEFAULT_XNUME.
yNumE : TYPE, optional
DESCRIPTION. The default is DEFAULT_YNUME.
xNumI : TYPE, optional
DESCRIPTION. The default is DEFAULT_XNUMI.
yNumI : TYPE, optional
DESCRIPTION. The default is DEFAULT_YNUMI.
degreeEE : TYPE, optional
DESCRIPTION. The default is DEFAULT_DEGREE_EE.
degreeEI : TYPE, optional
DESCRIPTION. The default is DEFAULT_DEGREE_EI.
weightEE : TYPE, optional
DESCRIPTION. The default is DEFAULT_WEIGHT_EE.
weightEI : TYPE, optional
DESCRIPTION. The default is DEFAULT_WEIGHT_EI.
weightIE : TYPE, optional
DESCRIPTION. The default is DEFAULT_WEIGHT_IE.
weightII : TYPE, optional
DESCRIPTION. The default is DEFAULT_WEIGHT_II.
Returns
-------
None.
'''
self.numE = xNumE * yNumE
self.xNumE,self.yNumE = xNumE,yNumE
self.numI = self.neuroNum - self.numE
self.xNumI,self.yNumI = xNumI,yNumI
if self.numI != xNumI * yNumI:
print('ERROR!!')
self.Esyn[-self.numI:,:] = -75 # mV
# assign x, y coordinates
xLocE = np.arange(xNumE) + 0.5 # + 0.5 for periodic condition
yLocE = np.arange(yNumE) + 0.5
xLocE,yLocE = np.meshgrid(xLocE,yLocE)
self.coordsE = np.stack((xLocE.reshape(-1),yLocE.reshape(-1))).T
xLocI = (np.arange(xNumI) + 0.5) * (xNumE / xNumI)
yLocI = (np.arange(yNumI) + 0.5) * (yNumE / yNumI)
xLocI,yLocI = np.meshgrid(xLocI,yLocI)
self.coordsI = np.stack((xLocI.reshape(-1),yLocI.reshape(-1))).T
# compute mexican-hat adjacency matrix
# compute distance matrices
distEE = distance.cdist(self.coordsE,self.coordsE,
lambda a,b: self.computeDist(a,b))
distEI = distance.cdist(self.coordsI,self.coordsE,
lambda a,b: self.computeDist(a,b))
self.distEE = distEE
self.distEI = distEI
# compute adjEE and adjEI
if degreeEE >= self.numE:
adjMatEE = weightEE * np.ones(shape = (self.numE,self.numE))
else:
adjMatEE = np.zeros(shape = (self.numE,self.numE))
adjMatEE[
np.argsort(distEE,axis = 0)[1:degreeEE+1,:].T.reshape(-1),
np.concatenate(
[i*np.ones(degreeEE,dtype=int) for i in np.arange(self.numE)])
] = weightEE
if degreeEI >= self.numI:
adjMatEI = weightEI * np.ones(shape = (self.numI,self.numE))
else:
adjMatEI = np.zeros(shape = (self.numI,self.numE))
adjMatEI[
np.argsort(distEI,axis = 0)[:degreeEI,:].T.reshape(-1),
np.concatenate(
[i*np.ones(degreeEI,dtype=int) for i in np.arange(self.numE)])
] = weightEI
# compute adjIE and adjII: all to all connection if degree < # of cells
if degreeIE >= self.numE:
adjMatIE = weightIE * np.ones(shape = (self.numE,self.numI))
else:
distIE = distance.cdist(self.coordsE,self.coordsI,
lambda a,b: self.computeDist(a, b))
adjMatIE = np.zeros(shape = (self.numE,self.numI))
adjMatIE[
np.argsort(distIE,axis=0)[:degreeIE,:].T.reshape(-1),
np.concatenate(
[i*np.ones(degreeIE,dtype=int) for i in np.arange(self.numI)])
] = weightIE
if degreeII >= self.numI:
adjMatII = weightII * np.ones(shape = (self.numI,self.numI))
else:
distII = distance.cdist(self.coordsI,self.coordsI,
lambda a,b: self.computeDist(a,b))
adjMatII = np.zeros(shape = (self.numI,self.numI))
adjMatII[
np.argsort(distII,axis = 0)[1:degreeII+1,:].T.reshape(-1),
np.concatenate(
[i*np.ones(degreeII,dtype=int) for i in np.arange(self.numI)])
] = weightII
# finally get the adjMat
self.adjMat = np.vstack((np.hstack((adjMatEE,adjMatIE)),
np.hstack((adjMatEI,adjMatII))))
return self
# compute the euclidean distance with periodic boundary conditions
def computeDist(self,a,b):
bounds = | np.array([self.xNumE,self.yNumE]) | numpy.array |
import numpy as np
from PIL import Image
from matplotlib import pyplot as plt
from numpy.fft import fft2, ifft2, fftshift, ifftshift
# %% read in the image and do transform
Q6_2 = np.array(Image.open("Q6_2.tif"))
row, col = Q6_2.shape
img_fourier = fftshift(fft2(Q6_2))
img_fourier_view = np.log( | np.abs(img_fourier) | numpy.abs |
from worldoptim.environments.cost_functions.base_multi_cost_function import BaseMultiCostFunction
from worldoptim.environments.cost_functions.costs.death_rate_cost import DeathRate
from worldoptim.environments.cost_functions.costs.quality_of_life_cost import QoLCost
import numpy as np
class MultiCostDeathrateQOL(BaseMultiCostFunction):
# This function computes two independent costs:
# The death rate cost: exponential function of death rate, with minimum threshold
# The quality of life cost: - QoL
def __init__(self,
drn,
use_constraints=False,
beta_default=0.5,
):
"""
Multi-objective cost functions with two costs: death rate and QoL. It is controllable by
the mixing parameter beta.
Parameters
----------
use_constraints: bool
Whether to use constraints on the maximum values of cumulative rewards.
beta_default: float
Default mixing parameter.
"""
super().__init__(use_constraints=use_constraints)
self.beta_default = beta_default
self.beta = self.beta_default
# Initialize cost functions
self.death_rate_cost = DeathRate(id_cost=0, drn=drn)
self.qol_cost = QoLCost(id_cost=1)
self.costs = [self.death_rate_cost, self.qol_cost]
self.nb_costs = len(self.costs)
if self.use_constraints:
self.goal_dim = 3
self.constraints_ids = [[1], [2]] # ids of the constraints in the goal vector (0 is mixing param)
else:
self.goal_dim = 1
self.constraints_ids = []
def sample_goal_params(self):
"""
Sample goal parameters.
Returns
-------
goal: 1D nd.array
Made of three params in [0, 1]: beta is the mixing parameter,
the following are normalized constraints on the maximal values of cumulative costs.
"""
beta = | np.random.rand() | numpy.random.rand |
import os
import math
import time
from time import strftime
import numpy as np
import numpy.linalg as LA
import scipy.io as sio
import pkg_resources
def repmat(A, rows, cols):
return np.tile(A, (cols, rows)).T
def vec(A):
# TODO: rewrite docstrings
"""
* Syntax: `a = vec(A)`
* Vectorization of a matrix. This function is a built-in function in some
recent MATLAB version.
"""
return A.flatten(1)
def label_to_range(label):
"""
Convert label to range
Parameters:
-----------
label: list of integers
must be in the form of [1, 1, ..., 1, 2, 2, ..., 2, ..., C, C, ..., C]
i.e. nondecreasing numbers starting from 1, each element is greater
than the previous element by at most 1
Returns:
--------
a list of intergers with C + 1 elements, start with 0
the i-th element is number of elements in label that equals to i
"""
res = [0]
assert label[0] == 1, 'label must start with 1'
for i in range(1, len(label)):
if label[i] == label[i-1]:
continue
if label[i] == label[i-1] + 1:
res.append(i)
else:
assert False,\
('label[{}] and label[{}] must be equal or two consecutive '
'integers, got {} and {}').format(
i-1, i, label[i-1], label[i]
)
res.append(len(label))
return res
def range_to_label(a_range):
"""
From a range, convert it to label
This is an inverse function of label_to_range
Parameters:
-----------
a_range: list of integers
must start with 0 and is a strictly increasing list
Returns:
--------
"""
assert a_range[0] == 0, 'input must start with 0'
res = []
for i in range(1, len(a_range)):
assert a_range[i] > a_range[i-1],\
('a_range must be an increasing list, '
'got a_range[{}] = {} < a_range[{}] = {}').format(
i, a_range[i], i - 1, a_range[i-1]
)
res.extend([i]*(a_range[i] - a_range[i-1]))
return res
def get_block_row(matrix, block_indices, row_range):
"""
Extract a subset of rows from a matrix
Parameters:
-----------
matrix: 2-d numpy array
block matrix
block_indices: integer of list of integers
indices of extracted blocks, 0-indexed. If indices is a list, return
the concatenation of all blocks
row_range: list of intergers
in the form of [0, c_1, c_1 + c_2, ..., c_1 + c_2 + ... + c_N]
where c_i is the number of rows in the i-th block
Returns:
--------
a 2-d matrix
"""
assert matrix.ndim == 2, 'Expect to receive 2-d array input, got shape {}'.format(matrix.shape)
if isinstance(block_indices, int):
block_indices = [block_indices]
# if isinstance(block_indices, (list, np.ndarray, np.generic))
ids = []
for i in block_indices:
ids = ids + list(range(row_range[i], row_range[i+1]))
return matrix[ids, :].copy()
def get_block_col(matrix, block_indices, col_range):
"""
Extract a subset of columns from a matrix
Parameters:
-----------
matrix: 2-d numpy array
block matrix
block_indices: integer of list of integers
indices of extracted blocks, 1-indexed. If indices is a list, return
the concatenation of all blocks
row_range: list of intergers
in the form of [0, c_1, c_1 + c_2, ..., c_1 + c_2 + ... + c_N]
where c_i is the number of columns in the i-th block
Returns:
--------
a 2-d matrix
"""
assert matrix.ndim == 2, 'Expect to receive 2-d array input, got shape {}'.format(matrix.shape)
assert matrix.shape[1] == col_range[-1]
return get_block_row(matrix.T, block_indices, col_range).T
def get_block(matrix, i, j, row_range, col_range):
"""
Extract a submatrix of a matrix
Parameters:
-----------
matrix the big matrix:
matrix = [ M11, M12, ..., M1m;
M21, M22, ..., M2m;
... ;
Mn1, Mn2, ..., Mnm]
i: row block index
j: column block index
row_range: row range
col_range: columns range
"""
return matrix[row_range[i]:row_range[i+1],
col_range[j]: col_range[j+1]].copy()
def norm1(X):
"""
Return norm 1 of a matrix, which is sum of the absolute value of all elements
of that matrix.
"""
if X.shape[0]*X.shape[1] == 0:
return 0
return abs(X).sum()
def normF2(X):
"""
Return square of the Frobenius norm, which is sum of square of all
elements in a matrix
"""
if X.shape[0]*X.shape[1] == 0:
return 0
return LA.norm(X, 'fro')**2
def normc(A):
"""
normalize each column of A to have norm2 = 1
"""
return A / np.tile(np.sqrt(np.sum(A*A, axis=0)), (A.shape[0], 1))
def nuclearnorm(X):
"""
Return nuclear norm of a matrix.
"""
if X.size == 0:
return 0
return LA.norm(X) if X.ndim == 1 else LA.norm(X, 'nuc')
def shrinkage(U, alambda):
"""
Soft thresholding function.
Solve the following optimization problem:
X = arg min_X 0.5*||X - U||_F^2 + lambda||X||_1
where U and X are matrices with same sizes. lambda can be either a positive
scalar or a positive matrix (all elements are positive) with same size as X.
"""
return np.maximum(0, U - alambda) + np.minimum(0, U + alambda)
def shrinkage_rank(D, alambda):
"""
Singular value thresholding algorithm for matrix completion.
Solve the following optimization problem:
X = arg min_X 0.5*||X - D||_F^2 + lambda*||X||_*
where ||X||_* is the nuclear norm.
"""
U, s, V = LA.svd(D, full_matrices=False)
s1 = np.maximum(0, s - alambda)
return np.dot(U, np.dot(np.diag(s1), V))
class MyForm:
"""
Describe a special family of matrices:
A = [ M 0 ... 0;
0 M ... 0;
0 0 ... M] +
[ N N ... N;
N N ... N;
N N ... N]
with k block rows and columns
"""
def __init__(self, M, N, k):
self.M = M.copy()
self.N = N.copy()
self.k = k
def full_express(self):
return np.kron(np.eye(self.k), self.M) + np.tile(self.N, (self.k, self.k))
def mult(self, other):
"""
Multiplication with other MyForm matrix
"""
A = np.dot(self.M, other.M)
B = np.dot(self.M, other.N) + np.dot(self.N, other.M) + \
self.k*np.dot(self.N, other.N)
return MyForm(A, B, self.k)
def inv(self):
"""
compute inverse matrix
"""
A = | LA.inv(self.M) | numpy.linalg.inv |
import math
import numpy as np
import tensorflow as tf
import torch
from torch import nn
import torch.nn.functional as F
import math
from tener.misc.pretty_print import print_info
def get_angles(pos, i, d_model):
angle_rates = 1 / np.power(10000, (2 * (i//2)) / np.float32(d_model))
return pos * angle_rates
def positional_encoding(position, d_model):
angle_rads = get_angles(np.arange(position)[:, np.newaxis],
np.arange(d_model)[np.newaxis, :],
d_model)
# apply sin to even indices in the array; 2i
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
# apply cos to odd indices in the array; 2i+1
angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
pos_encoding = angle_rads[np.newaxis, ...]
return tf.cast(pos_encoding, dtype=tf.float32)
def make_positions(tensor, padding_idx=0):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1. Padding symbols are ignored.
"""
# The series of casts and type-conversions here are carefully
# balanced to both work with ONNX export and XLA. In particular XLA
# prefers ints, cumsum defaults to output longs, and ONNX doesn't know
# how to handle the dtype kwarg in cumsum.
mask = tf.math.not_equal(tensor, padding_idx)
mask = tf.cast(mask, tf.int32)
return (
tf.math.cumsum(mask, axis=1) * mask
) + padding_idx
class SinusoidalPositionalEmbeddingNaive(tf.keras.layers.Layer):
"""
PE_(pos, 2i) = sin(pos/10000^(2i/d_model)) # even position
PE_(pos, 2i+1) = cos(pos/10000^(2i/d_model)) # odd position
"""
def __init__(self, maximum_position_encoding, d_model):
# super(SinusoidalPositionalEmbedding, self).__init__()
super().__init__()
self.pos_encoding = positional_encoding(maximum_position_encoding, d_model)
def call(self, x):
"""
:param x: Tensor of soze [batch_size, max_seq_length]
:return:
"""
max_seq_len = tf.shape(x)[1]
pos = self.pos_encoding[:, :max_seq_len, :]
return pos
class SinusoidalPositionalEmbedding(tf.keras.layers.Layer):
"""
This module produces sinusoidal positional embeddings of any length.
Padding symbols are ignored.
"""
def __init__(self, embedding_dim, padding_idx, init_size=1568):
super().__init__()
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.embd_weights = SinusoidalPositionalEmbedding.get_embedding(
init_size,
embedding_dim,
padding_idx,
)
self.embd_weights = tf.convert_to_tensor(self.embd_weights)
@staticmethod
def get_embedding(num_embeddings, embedding_dim, padding_idx=None):
"""Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = np.exp(np.arange(half_dim, dtype=np.float32) * -emb)
emb = np.expand_dims( | np.arange(num_embeddings, dtype=np.float32) | numpy.arange |
import numpy as np
# t04 is identical to t01 except for several factors.
def t04(parmod,ps,x,y,z):
"""
A data-based model of the external (i.e., without earth's contribution) part of the
magnetospheric magnetic field, calibrated by
(1) solar wind pressure pdyn (nanopascals),
(2) dst (nanotesla),
(3) byimf,
(4) bzimf (nanotesla)
(5-10) indices w1 - w6, calculated as time integrals from the beginning of a storm
see the reference (3) below, for a detailed definition of those variables
:param parmod: The elements are explained above.
:param x,y,z: GSM coordinates in Re (1 Re = 6371.2 km)
:return: bx,by,bz. Field components in GSM system, in nT.
Computed as a sum of contributions from principal field sources.
Assembled: March 25, 2004; Updated: August 2 & 31, December 27, 2004.
A bug eliminated March 14, 2005 (might cause compilation problems with some fortran compilers)
Attention: The model is based on data taken sunward from x=-15Re, and hence becomes invalid at larger tailward distances !!! *
REFERENCES:
(1) <NAME>, A new data-based model of the near magnetosphere magnetic field:
1. Mathematical structure.
2. Parameterization and fitting to observations. JGR v. 107(A8), 1176/1179, doi:10.1029/2001JA000219/220, 2002.
(2) <NAME>, <NAME>, <NAME>, Storm-time distortion of the
inner magnetosphere: How severe can it get ? JGR v. 108(A5), 1209, doi:10.1029/2002JA009808, 2003.
(3) <NAME> and <NAME>, Modeling the dynamics of the inner magnetosphere during
strong geomagnetic storms, J. Geophys. Res., v. 110 (A3), A03208, doi: 10.1029/2004JA010798, 2005.
"""
a = np.array([
1.00000,5.44118,0.891995,9.09684,0.00000,-7.18972,12.2700,
-4.89408,0.00000,0.870536,1.36081,0.00000,0.688650,0.602330,
0.00000,0.316346,1.22728,-0.363620E-01,-0.405821,0.452536,
0.755831,0.215662,0.152759,5.96235,23.2036,11.2994,69.9596,
0.989596,-0.132131E-01,0.985681,0.344212E-01,1.02389,0.207867,
1.51220,0.682715E-01,1.84714,1.76977,1.37690,0.696350,0.343280,
3.28846,111.293,5.82287,4.39664,0.383403,0.648176,0.318752E-01,
0.581168,1.15070,0.843004,0.394732,0.846509,0.916555,0.550920,
0.180725,0.898772,0.387365,2.26596,1.29123,0.436819,1.28211,
1.33199,.405553,1.6229,.699074,1.26131,2.42297,.537116,.619441])
iopgen,ioptt,iopb,iopr = [0.]*4
pdyn=parmod[0]
dst_ast=parmod[1]*0.8-13*np.sqrt(pdyn)
bximf,byimf,bzimf=[0.,parmod[2],parmod[3]]
w1,w2,w3,w4,w5,w6 = parmod[4:10]
pss,xx,yy,zz = [ps,x,y,z]
return extern(iopgen,ioptt,iopb,iopr,a,69,pdyn,dst_ast,bximf,byimf,bzimf,
w1,w2,w3,w4,w5,w6,pss,xx,yy,zz)
def extern(iopgen,iopt,iopb,iopr,a,ntot,pdyn,dst,bximf,byimf,bzimf,w1,w2,w3,w4,w5,w6,ps,x,y,z):
"""
:param iopgen: general option flag:
iopgen=0 - calculate total field
iopgen=1 - dipole shielding only
iopgen=2 - tail field only
iopgen=3 - birkeland field only
iopgen=4 - ring current field only
iopgen=5 - interconnection field only
:param iopt: tail field flag:
iopt=0 - both modes
iopt=1 - mode 1 only
iopt=2 - mode 2 only
:param iopb: birkeland field flag:
iopb=0 - all 4 terms
iopb=1 - region 1, modes 1 and 2
iopb=2 - region 2, modes 1 and 2
:param iopr: ring current flag:
iopr=0 - both src and prc
iopr=1 - src only
iopr=2 - prc only
"""
# common /tail/ dxshift1,dxshift2,d,deltady ! the common blocks forward nonlinear parameters
# common /birkpar/ xkappa1,xkappa2
# common /rcpar/ sc_sy,sc_as,phi
# common /g/ g
# common /rh0/ rh0
global dxshift1, dxshift2, d, deltady
global xkappa1, xkappa2
global sc_sy, sc_pr, phi
global g
global rh0
a0_a,a0_s0,a0_x0 = [34.586,1.1960,3.4397] # Shue et al. parameters
dsig = 0.005
rh0,rh2 = [8.0,-5.2]
xappa = (pdyn/2.)**a[22] # overall scaling parameter
rh0 = 7.5 # tail hinging distance
g = 35.0 # tail warping parameter
xappa3=xappa**3
xx=x*xappa
yy=y*xappa
zz=z*xappa
sps= | np.sin(ps) | numpy.sin |
"""
===========================================
WEDTM Demo
Inter and Intra Topic Structure Learning with Word Embeddings
He Zhao, <NAME>, <NAME>, <NAME>
Published in International Council for Machinery Lubrication 2018
===========================================
"""
# Author: <NAME> <<EMAIL>>; <NAME> <<EMAIL>>; <NAME> <<EMAIL>>
# License: BSD-3-Clause
import os
import copy
import time
import math
import numpy as np
from ._basic_model import Basic_Model
from .._sampler import Basic_Sampler
from .._utils import *
from scipy import sparse
class WEDTM(Basic_Model):
def __init__(self, K: [list], device='gpu'):
"""
The basic model for WEDTM
Inputs:
K : [list] number of topics of each layer;
device : [str] 'cpu' or 'gpu';
Attributes:
@public:
global_params : [Params] the global parameters of the probabilistic model
local_params : [Params] the local parameters of the probabilistic model
@private:
_model_setting : [Params] the model settings of the probabilistic model
_hyper_params : [Params] the hyper parameters of the probabilistic model
"""
super(WEDTM, self).__init__()
setattr(self, '_model_name', 'WEDTM')
self._model_setting.K = K
self._model_setting.T = len(K)
self._model_setting.device = device
assert self._model_setting.device in ['cpu', 'gpu'], 'Device Type Error: the device should be ''cpu'' or ''gpu'''
self._sampler = Basic_Sampler(self._model_setting.device)
def initial(self, data):
'''
Inintial the parameters of WEDTM with the input documents
Inputs:
data : [np.ndarray] or [scipy.sparse.csc.csc_matrix] V*N matrix, N bag-of-words vectors with a vocabulary length of V
Attributes:
@public:
global_params.Phi : [np.ndarray] V*K matrix, K topics with a vocabulary length of V
local_params.Theta : [np.ndarray] N*K matrix, the topic propotions of N documents
@private:
_model_setting.V : [int] scalar, the length of the vocabulary
'''
self._model_setting.V = data.shape[0]
self.global_params.Phi = np.zeros((self._model_setting.K[0], self._model_setting.V)).astype(int)
def train(self, embeddings: np.ndarray, S: int, iter_all: int, data: np.ndarray, is_train: bool = True):
'''
Inputs:
embeddings : [np.ndarray] V*D, word embedding of training words
S : [int] sub topics
iter_all : [np.ndarray] scalar, the iterations of gibbs sampling
data : [np.ndarray] V*N_train matrix, N_train bag-of-words vectors with a vocabulary length of V
is_train : [bool] True or False, whether to update the global params in the probabilistic model
Attributes:
@public:
local_params.Theta : [np.ndarray] N_train*K matrix, the topic propotions of N_train documents
@private:
_model_setting.N : [int] scalar, the number of the documents in the corpus
_model_setting.Iteration : [int] scalar, the iterations of sampling
Outputs:
local_params : [Params] the local parameters of the probabilistic model
'''
assert type(data) is np.ndarray, 'Data type error: the input data should be a 2-D np.ndarray'
self._model_setting.Iteration = [iter_all] * self._model_setting.T
self._model_setting.N = data.shape[1]
# initial local paramters
self.local_params.Theta = np.zeros((self._model_setting.K[0], self._model_setting.N)).astype(int)
# WS the trained words' word index
# DS the trained words' doc index
# ZS the trained words' random theme
words_num = np.sum(data)
WS = np.zeros(words_num).astype(int)
DS = np.zeros(words_num).astype(int)
wi, di = np.where(data)
cc = data[wi, di]
pos = 0
for i in range(len(cc)):
WS[pos:pos+cc[i]] = wi[i]
DS[pos:pos+cc[i]] = di[i]
pos = pos+cc[i]
a0 = 0.01
b0 = 0.01
e0 = 1
f0 = 1
beta0 = 0.05
# Add the default word embedding
embeddings = np.insert(embeddings, embeddings.shape[1], values=np.ones(self._model_setting.V), axis=1)
self.Theta = [[]] * self._model_setting.T
c_j = [[]] * (self._model_setting.T + 1)
for t in range(self._model_setting.T + 1):
c_j[t] = np.ones((1, self._model_setting.N))
self.Phi = [{}] * self._model_setting.T
Xt_to_t1 = [[]] * self._model_setting.T
WSZS = [[]] * self._model_setting.T
paraGlobal = [{}] * self._model_setting.T
# Initialise beta for t = 1
beta1, self.beta_para = self._init_beta(self._model_setting.K[0], self._model_setting.V, S, embeddings, beta0)
for Tcurrent in range(self._model_setting.T):
if Tcurrent == 0: # layer 1, initial params.
ZS = np.random.randint(self._model_setting.K[Tcurrent], size=(len(DS))) # theme of each words
self.local_params.Theta = np.zeros((self._model_setting.K[Tcurrent], self._model_setting.N)).astype(int) # Theta (K,N) distribution of theme
for i in range(len(ZS)):
self.local_params.Theta[ZS[i], DS[i]] += 1
if is_train:
self.global_params.Phi = np.zeros((self._model_setting.K[Tcurrent], self._model_setting.V)).astype(int) # ZSWS Phi (K,V) distribution of words
for i in range(len(ZS)):
self.global_params.Phi[ZS[i], WS[i]] += 1
WSZS[Tcurrent] = self.global_params.Phi.T
Xt_to_t1[Tcurrent] = self.local_params.Theta
n_dot_k = np.sum(self.local_params.Theta, 1) # count number of each theme in doc
p_j = self._calculate_pj(c_j, Tcurrent)
r_k = 1 / self._model_setting.K[Tcurrent] * np.ones(self._model_setting.K[Tcurrent])
gamma0 = 1
c0 = 1
else:
self._model_setting.K[Tcurrent] = self._model_setting.K[Tcurrent - 1]
if self._model_setting.K[Tcurrent] <= 4:
break
self.Phi[Tcurrent] = np.random.rand(self._model_setting.K[Tcurrent - 1], self._model_setting.K[Tcurrent])
self.Phi[Tcurrent] = self.Phi[Tcurrent] / np.maximum(realmin, np.sum(self.Phi[Tcurrent], 0))
self.Theta[Tcurrent] = np.ones((self._model_setting.K[Tcurrent], self._model_setting.N)) / self._model_setting.K[Tcurrent]
p_j = self._calculate_pj(c_j, Tcurrent)
r_k = 1 / self._model_setting.K[Tcurrent] * np.ones(self._model_setting.K[Tcurrent])
gamma0 = self._model_setting.K[Tcurrent] / self._model_setting.K[1]
c0 = 1
for iter in range(1, self._model_setting.Iteration[Tcurrent]):
start_time = time.time()
for t in range(Tcurrent + 1):
if t == 0:
dex111 = list(range(len(ZS)))
np.random.shuffle(dex111)
ZS = ZS[dex111]
DS = DS[dex111]
WS = WS[dex111]
if Tcurrent == 0:
shape = np.dot(r_k.reshape(-1, 1), np.ones((1, self._model_setting.N)))
else:
shape = np.dot(self.Phi[1], self.Theta[1])
beta1_sum = np.sum(beta1, 1)
# Modified from GNBP_mex_collapsed_deep.c in the GBN code,
# to support a full matrix of beta1
[self.local_params.Theta, temp, n_dot_k, ZS] = self._collapsed_gibbs_topic_assignment_mex(
self.local_params.Theta, self.global_params.Phi, n_dot_k, ZS, WS, DS, shape, beta1, beta1_sum)
if is_train:
self.global_params.Phi = temp
WSZS[t] = self.global_params.Phi.T
Xt_to_t1[t] = self.local_params.Theta
# Sample the variables related to sub-topics
beta1 = self.sample_beta(WSZS[t].T, embeddings, beta1)
else:
[Xt_to_t1[t], WSZS[t]] = self._sampler.multi_aug(Xt_to_t1[t-1], self.Phi[t], self.Theta[t])
if t > 0:
self.Phi[t] = self._sample_Phi(WSZS[t], beta0)
if np.count_nonzero(np.isnan(self.Phi[t])):
Warning('Phi Nan')
self.Phi[t][np.isnan(self.Phi[t])] = 0
Xt = self._crt_sum_mex_matrix_v1(sparse.csc_matrix(Xt_to_t1[Tcurrent].T), r_k.reshape(1, -1).T).T
r_k, gamma0, c0 = self._sample_rk(Xt, r_k, p_j[Tcurrent+1], gamma0, c0)
if iter > 10:
if Tcurrent > 0:
p_j[1] = self._sampler.beta(np.sum(Xt_to_t1[0], 0)+a0, np.sum(self.Theta[1], 0)+b0)
else:
p_j[1] = self._sampler.beta(np.sum(Xt_to_t1[0], 0)+a0, np.sum(r_k)+b0)
p_j[1] = np.minimum(np.maximum(p_j[1], np.spacing(1)), 1-np.spacing(1))
c_j[1] = (1 - p_j[1]) / p_j[1]
for t in range(2, Tcurrent+2):
if t == Tcurrent+1:
c_j[t] = self._sampler.gamma(np.sum(r_k)*np.ones((1, self._model_setting.N))+e0) / (np.sum(self.Theta[t-1], 0)+f0)
else:
c_j[t] = self._sampler.gamma(np.sum(self.Theta[t], 0)+e0) / (np.sum(self.Theta[t-1], 0)+f0)
p_j_temp = self._calculate_pj(c_j, Tcurrent)
p_j[2:] = p_j_temp[2:]
for t in range(Tcurrent, -1, -1):
if t == Tcurrent:
shape = r_k.reshape(-1, 1)
else:
shape = np.dot(self.Phi[t+1], self.Theta[t+1])
if t > 0:
self.Theta[t] = self._sampler.gamma(shape+Xt_to_t1[t]) * (1/(c_j[t+1] - np.log(np.maximum(1 - p_j[t], realmin))))
# (100, 12337/987) (1, 12337)
if np.count_nonzero(np.isnan(self.Theta[t])):
Warning('Theta Nan')
self.Theta[t][np.isnan(self.Theta[t])] = 0
end_time = time.time()
stages = 'Training' if is_train else 'Testing'
print(f'{stages} Stage: ',
f'Layer {Tcurrent:3d}, epoch {iter:3d} takes {end_time - start_time:.2f} seconds, topics {np.count_nonzero(Xt):3d}')
for t in range(Tcurrent + 1):
if t == 0:
self.Phi[t] = self._sample_Phi(WSZS[t], beta1.T, True)
else:
self.Phi[t] = self._sample_Phi(WSZS[t], beta0, True)
paraGlobal[Tcurrent]['Phi'] = self.Phi
paraGlobal[Tcurrent]['r_k'] = r_k
paraGlobal[Tcurrent]['gamma0'] = gamma0
paraGlobal[Tcurrent]['c0'] = c0
paraGlobal[Tcurrent]['K'] = self._model_setting.K[:Tcurrent]
paraGlobal[Tcurrent]['beta0'] = beta0
paraGlobal[Tcurrent]['beta_para'] = self.beta_para
paraGlobal[Tcurrent]['p_j'] = p_j # for theta
paraGlobal[Tcurrent]['c_j'] = c_j
paraGlobal[Tcurrent]['Xt_to_t1'] = Xt_to_t1
paraGlobal[Tcurrent]['cjmedian'] = []
for t in range(Tcurrent + 1):
paraGlobal[Tcurrent]['cjmedian'].append(np.median(c_j[t]))
return copy.deepcopy(self.local_params)
def test(self, embeddings: np.ndarray, S: int, iter_all: list, data: np.ndarray):
'''
Inputs:
embeddings : [np.ndarray] V*D, word embedding of training words
S : [int] number of sub topics
iter_all : [np.ndarray] scalar, the iterations of gibbs sampling
data : [np.ndarray] V*N_train matrix, N_train bag-of-words vectors with a vocabulary length of V
Outputs:
local_params : [Params] the local parameters of the probabilistic model
'''
local_params = self.train(embeddings, S, iter_all, data, is_train=False)
return local_params
def save(self, model_path: str = './save_models'):
'''
Save the model to the specified directory.
Inputs:
model_path : [str] the directory path to save the model, default './save_models/WEDTM.npy'
'''
# create the directory path
if not os.path.isdir(model_path):
os.mkdir(model_path)
# save the model
model = {}
for params in ['global_params', 'local_params', '_model_setting', '_hyper_params']:
if params in dir(self):
model[params] = getattr(self, params)
np.save(model_path + '/' + self._model_name + '.npy', model)
print('model have been saved by ' + model_path + '/' + self._model_name + '.npy')
def load(self, model_path: str):
'''
Load the model parameters from the specified directory
Inputs:
model_path : [str] the directory path to load the model;
'''
assert os.path.exists(model_path), 'Path Error: can not find the path to load the model'
model = np.load(model_path, allow_pickle=True).item()
for params in ['global_params', 'local_params', '_model_setting', '_hyper_params']:
if params in model:
setattr(self, params, model[params])
def _init_beta(self, K, V, S, embeddings, beta):
L = embeddings.shape[1]
beta_para = [{}] * S
for s in range(S):
# variables for sub-topic s
beta_para[s]['beta_s'] = beta/S * np.ones((K, V))
beta_para[s]['alpha_k'] = 0.1 * np.ones((K, 1))
beta_para[s]['W'] = 0.1 * np.ones((K, L))
beta_para[s]['pi'] = np.dot(beta_para[s]['W'], embeddings.T)
beta_para[s]['sigma'] = np.ones((K, L))
beta_para[s]['c0'] = 1
beta_para[s]['alpha0'] = 1
beta1 = beta * np.ones((K, V))
return beta1, beta_para
def _calculate_pj(self, c_j, T):
'''
calculate p_j from layer 1 to T+1
same as pfa
'''
p_j = [[]] * (T+2)
N = len(c_j[1])
p_j[0] = (1-np.exp(-1)) * np.ones((1, N))
p_j[1] = 1/(1 + c_j[1])
for t in range(2, T+2):
temp = -np.log(np.maximum(1-p_j[t - 1], realmin))
p_j[t] = temp / (temp + c_j[t])
if np.count_nonzero(np.isnan(p_j[t])):
Warning('pj Nan')
p_j[t][np.isnan(p_j[t])] = np.spacing(1)
return p_j
def _collapsed_gibbs_topic_assignment_mex(self, ZSDS, ZSWS, n_dot_k, ZS, WS, DS, shape, eta, eta_sum):
'''
same as DirBN
'''
Ksize, Nsize = ZSDS.shape
WordNum = WS.shape[0]
prob_cumsum = np.zeros((Ksize, 1))
for i in range(WordNum):
v = WS[i]
j = DS[i]
k = ZS[i]
if ZS[i] > -1:
ZSDS[k, j] -= 1
ZSWS[k, v] -= 1
n_dot_k[k] -= 1
cum_sum = 0
for k in range(Ksize):
cum_sum += (eta[k, v] + ZSWS[k, v]) / (eta_sum[k] + n_dot_k[k]) * (ZSDS[k, j] + shape[k, j])
prob_cumsum[k] = cum_sum
probrnd = np.random.rand() * cum_sum
k = self._binary_search(probrnd, prob_cumsum, Ksize)
ZS[i] = k
ZSDS[k, j] += 1
ZSWS[k, v] += 1
n_dot_k[k] += 1
return ZSDS, ZSWS, n_dot_k, ZS
def _binary_search(self, probrnd, prob_cumsum, Ksize):
if probrnd <= prob_cumsum[0]:
return 0
else:
kstart = 1
kend = Ksize - 1
while 1:
if kstart >= kend:
return kend
else:
k = kstart + int((kend - kstart) / 2)
if (prob_cumsum[k - 1][0] > probrnd) & (prob_cumsum[k][0] > probrnd):
kend = k - 1
elif (prob_cumsum[k - 1][0] < probrnd) & (prob_cumsum[k][0] < probrnd):
kstart = k + 1
else:
return k
return k
def _sample_beta(self, n_topic_word, F, beta1):
a0 = 0.01
b0 = 0.01
e0 = 1
f0 = 1
S = len(self.beta_para)
L = F.shape[1]
# The word count for each v and k in the first layer
[K, V] = n_topic_word.shape
n_sum = np.sum(n_topic_word, 1)
## Eq. (3)
log_inv_q = -np.log(self._sampler.beta(np.sum(beta1, 1), np.maximum(n_sum, realmin)))
log_log_inv_q = np.log(np.maximum(log_inv_q, realmin))
# Active topics in the first layer
active_k = (~np.isnan(log_inv_q)) & (~np.isinf(log_inv_q)) & (n_sum > 0) & (log_inv_q != 0)
## Eq. (4) and (6)
h = np.zeros((K, V, S)).astype(int)
for k in range(K):
for v in range(V):
for j in range(n_topic_word[k, v]):
if j == 0:
is_add_table = 1
else:
is_add_table = (np.random.rand() < beta1[k, v] / (beta1[k, v] + j + 1))
if is_add_table > 0:
p = np.zeros((S, 1))
for s in range(S):
p[s] = self.beta_para[s]['beta_s'][k, v]
sum_cum = np.cumsum(p)
temp = np.argwhere(sum_cum > np.random.rand() * sum_cum[-1])
if len(temp) > 0:
ss = temp[0]
else:
continue
h[k, v, ss] = h[k, v, ss] + 1
beta1 = 0
for s in range(S):
## For each sub-topic s
alpha_k = self.beta_para[s]['alpha_k']
pi_pg = self.beta_para[s]['pi']
W = self.beta_para[s]['W']
c0 = self.beta_para[s]['c0']
alpha0 = self.beta_para[s]['alpha0']
h_s = h[:, :, s]
# Sample alpha_k for each sub-topic s with the hierarchical gamma
h_st = np.zeros((K, V)).astype(int)
# Eq. (11)
h_st[h_s > 0] = 1
for k in range(K):
for v in range(V):
for j in range(h_s[k, v] - 1):
h_st[k, v] = h_st[k, v] + (np.random.rand() < alpha_k[k] / (alpha_k[k] + j + 1)).astype(int)
# Eq. (10)
h_st_dot = np.sum(h_st, 1)
# Active topics in each sub-topic s
local_active_k = h_st_dot > 0 & active_k
l_a_K = sum(local_active_k)
x = pi_pg + log_log_inv_q.reshape(-1, 1)
dex = x < 0
temp = np.zeros(x.shape)
temp[dex] = np.log1p(np.exp(x[dex]))
temp[~dex] = x[~dex]+np.log1p(np.exp(-x[~dex]))
temp = np.sum(temp, 1)
# Eq. (9)
alpha_k = (self._sampler.gamma(alpha0 / l_a_K + h_st_dot) / (c0 + temp)).reshape(-1, 1)
h_stt = np.zeros((K, 1))
h_stt[h_st_dot > 0] = 1
for k in range(K):
for j in range(h_st_dot[k] - 1):
h_stt[k] = h_stt[k] + (np.random.rand() < (alpha0 / l_a_K) / (alpha0 / l_a_K + j + 1)).astype(int)
temp2 = temp / (c0 + temp)
# L17 in Figure 1 in the appendix
alpha0 = self._sampler.gamma(a0 + np.sum(h_stt)) / (b0 - np.sum( | np.log(1 - temp2[local_active_k]) | numpy.log |
#--- plot all the features in 2d by taking 2 features at one time ---#
#--- import required libraries ---#
get_ipython().magic('matplotlib inline')
import csv
import scipy
import sklearn
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
from PIL import Image
from mpl_toolkits.mplot3d import Axes3D
import mpl_toolkits.mplot3d.axes3d as p3
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
#--- read csv file ---#
with open('random_rows.csv', 'r') as f:
reader = csv.reader(f)
csv_values = list(reader)
#--- convert data type from string to float ---#
def read_lines():
with open('random_rows.csv', 'rU') as data:
reader = csv.reader(data)
for row in reader:
yield [ float(i) for i in row ]
#--- divide observations into training and testing data ---#
def observations(l):
a = 2*(len(l)/3) + (len(l)%3)
return(l[:a],l[a:])
#--- values for meshgrid ---#
xx, yy= np.meshgrid( | np.linspace(-5, 5, 100) | numpy.linspace |
import os
import pytest
from numpy.testing import assert_allclose
from numpy import ones, zeros, float64, array, append, genfromtxt
from numpy.linalg import LinAlgError
from touvlo.lin_rg import (normal_eqn, cost_func, reg_cost_func, grad,
reg_grad, predict, h, LinearRegression,
RidgeLinearRegression, reg_normal_eqn)
from touvlo.utils import numerical_grad
TESTDATA1 = os.path.join(os.path.dirname(__file__), 'data1.csv')
TESTDATA2 = os.path.join(os.path.dirname(__file__), 'data2.csv')
class TestLinearRegression:
@pytest.fixture(scope="module")
def data1(self):
return genfromtxt(TESTDATA1, delimiter=',')
@pytest.fixture(scope="module")
def data2(self):
return genfromtxt(TESTDATA2, delimiter=',')
@pytest.fixture(scope="module")
def err(self):
return 1e-4
# NORMAL EQUATION
def test_normal_eqn_data1(self, data1):
y = data1[:, -1:]
X = data1[:, :-1]
m, _ = X.shape
intercept = ones((m, 1), dtype=int)
X = append(intercept, X, axis=1)
assert_allclose([[-3.896], [1.193]],
normal_eqn(X, y),
rtol=0, atol=0.001)
def test_normal_eqn_data2(self, data2):
y = data2[:, -1:]
X = data2[:, :-1]
m, _ = X.shape
intercept = ones((m, 1), dtype=int)
X = append(intercept, X, axis=1)
assert_allclose([[89597.909], [139.210], [-8738.019]],
normal_eqn(X, y),
rtol=0, atol=0.001)
def test_reg_normal_eqn_data1_1(self, data1):
y = data1[:, -1:]
X = data1[:, :-1]
m, _ = X.shape
intercept = ones((m, 1), dtype=int)
X = append(intercept, X, axis=1)
_lambda = 0
assert_allclose([[-3.896], [1.193]],
reg_normal_eqn(X, y, _lambda),
rtol=0, atol=0.001)
def test_reg_normal_eqn_data1_2(self, data1):
y = data1[:, -1:]
X = data1[:, :-1]
m, _ = X.shape
intercept = ones((m, 1), dtype=int)
X = append(intercept, X, axis=1)
_lambda = 1
assert_allclose([[-3.889], [1.192]],
reg_normal_eqn(X, y, _lambda),
rtol=0, atol=0.001)
def test_reg_normal_eqn_data2(self, data2):
y = data2[:, -1:]
X = data2[:, :-1]
m, _ = X.shape
intercept = ones((m, 1), dtype=int)
X = append(intercept, X, axis=1)
_lambda = 100
assert_allclose([[74104.492], [135.249], [-1350.731]],
reg_normal_eqn(X, y, _lambda),
rtol=0, atol=0.001)
def test_normal_eqn_singular(self, data2):
y = array([[0], [0], [0]])
X = array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
m, _ = X.shape
intercept = ones((m, 1), dtype=int)
X = append(intercept, X, axis=1)
with pytest.raises(LinAlgError) as excinfo:
normal_eqn(X, y)
msg = excinfo.value.args[0]
assert msg == ("Singular matrix")
def test_reg_normal_eqn_singular1(self, data2):
y = array([[0], [0], [0]])
X = array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
m, _ = X.shape
intercept = ones((m, 1), dtype=int)
X = append(intercept, X, axis=1)
_lambda = 0
with pytest.raises(LinAlgError) as excinfo:
reg_normal_eqn(X, y, _lambda),
msg = excinfo.value.args[0]
assert msg == ("Singular matrix")
def test_reg_normal_eqn_singular2(self, data2):
y = array([[0], [0], [0]])
X = array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
m, _ = X.shape
intercept = ones((m, 1), dtype=int)
X = append(intercept, X, axis=1)
_lambda = 0.1
assert_allclose([[0], [0], [0], [0]],
reg_normal_eqn(X, y, _lambda),
rtol=0, atol=0.001)
# COST FUNCTION
def test_cost_func_data1_1(self, data1):
y = data1[:, -1:]
X = data1[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = zeros((n + 1, 1), dtype=float64)
assert_allclose([[32.073]],
cost_func(X, y, theta),
rtol=0, atol=0.001)
def test_cost_func_data1_2(self, data1):
y = data1[:, -1:]
X = data1[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = ones((n + 1, 1), dtype=float64)
assert_allclose([[10.266]],
cost_func(X, y, theta),
rtol=0, atol=0.001)
def test_cost_func_data1_3(self, data1):
y = data1[:, -1:]
X = data1[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = array([[-1], [2]])
assert_allclose([[54.242]],
cost_func(X, y, theta),
rtol=0, atol=0.001)
def test_cost_func_data2_1(self, data2):
y = data2[:, -1:]
X = data2[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = zeros((n + 1, 1), dtype=float64)
assert_allclose([[65591548106.457]],
cost_func(X, y, theta),
rtol=0, atol=0.001)
def test_cost_func_data2_2(self, data2):
y = data2[:, -1:]
X = data2[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = ones((n + 1, 1), dtype=float64)
assert_allclose([[64828197300.798]],
cost_func(X, y, theta),
rtol=0, atol=0.001)
def test_cost_func_data2_3(self, data2):
y = data2[:, -1:]
X = data2[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = array([[-25.3], [32], [7.8]])
assert_allclose([[43502644952.311]],
cost_func(X, y, theta),
rtol=0, atol=0.001)
# REGULARIZED COST FUNCTION
def test_reg_cost_func_data1_1(self, data1):
y = data1[:, -1:]
X = data1[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = ones((n + 1, 1), dtype=float64)
_lambda = 0
assert_allclose([[10.266]],
reg_cost_func(X, y, theta, _lambda),
rtol=0, atol=0.001)
def test_reg_cost_func_data1_2(self, data1):
y = data1[:, -1:]
X = data1[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = ones((n + 1, 1), dtype=float64)
_lambda = 100
assert_allclose([[10.781984]],
reg_cost_func(X, y, theta, _lambda),
rtol=0, atol=0.001)
def test_reg_cost_func_data1_3(self, data1):
y = data1[:, -1:]
X = data1[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = array([[-1], [2]])
_lambda = 750
assert_allclose([[69.706373]],
reg_cost_func(X, y, theta, _lambda),
rtol=0, atol=0.001)
def test_reg_cost_func_data2_1(self, data2):
y = data2[:, -1:]
X = data2[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = ones((n + 1, 1), dtype=float64)
_lambda = 0
assert_allclose([[64828197300.798]],
reg_cost_func(X, y, theta, _lambda),
rtol=0, atol=0.001)
def test_reg_cost_func_data2_2(self, data2):
y = data2[:, -1:]
X = data2[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = ones((n + 1, 1), dtype=float64)
_lambda = 1000000
assert_allclose([[64828218577.393623]],
reg_cost_func(X, y, theta, _lambda),
rtol=0, atol=0.001)
def test_reg_cost_func_data2_3(self, data2):
y = data2[:, -1:]
X = data2[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = array([[-25.3], [32], [7.8]])
_lambda = 1000000
assert_allclose([[43514185803.375198]],
reg_cost_func(X, y, theta, _lambda),
rtol=0, atol=0.001)
# GRADIENT
def test_grad_data1_1(self, data1):
y = data1[:, -1:]
X = data1[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = zeros((n + 1, 1), dtype=float64)
assert_allclose([[-5.839], [-65.329]],
grad(X, y, theta),
rtol=0, atol=0.001)
def test_grad_data1_2(self, data1):
y = data1[:, -1:]
X = data1[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = ones((n + 1, 1), dtype=float64)
assert_allclose([[3.321], [24.235]],
grad(X, y, theta),
rtol=0, atol=0.001)
def test_grad_data1_3(self, data1):
y = data1[:, -1:]
X = data1[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = array([[-1], [2]])
assert_allclose([[9.480], [89.319]],
grad(X, y, theta),
rtol=0, atol=0.001)
def test_grad_data1_4(self, data1, err):
y = data1[:, -1:]
X = data1[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = (1 / 3) * ones((n + 1, 1), dtype=float64)
def J(theta):
return cost_func(X, y, theta)
assert_allclose(grad(X, y, theta),
numerical_grad(J, theta, err),
rtol=0, atol=0.001)
def test_grad_data1_5(self, data1, err):
y = data1[:, -1:]
X = data1[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = - 7.43 * ones((n + 1, 1), dtype=float64)
def J(theta):
return cost_func(X, y, theta)
assert_allclose(grad(X, y, theta),
numerical_grad(J, theta, err),
rtol=0, atol=0.001)
def test_grad_data1_6(self, data1, err):
y = data1[:, -1:]
X = data1[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = array([[3.46], [-2.76]])
def J(theta):
return cost_func(X, y, theta)
assert_allclose(grad(X, y, theta),
numerical_grad(J, theta, err),
rtol=0, atol=0.001)
def test_grad_data2_1(self, data2):
y = data2[:, -1:]
X = data2[:, :-1]
m, n = X.shape
intercept = | ones((m, 1), dtype=float64) | numpy.ones |
import numpy as np
import os
import time
import sys
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from astropy.io import fits
from deprojectVis import deprojectVis
from discreteModel import discreteModel
# I/O
filename = 'data/blind2_fo.combo.noisy.image.fits'
# radial bins setup
nbins = 20
b = 0.05 + 0.05*np.arange(nbins)
a = np.roll(b, 1)
rin = 0.1/140.
a[0] = rin
cb = 0.5*(a+b)
bins = rin, b
# - Fiducial guess for SB(r) based on synthesized image
# load synthesized image data
hdulist = fits.open(filename)
data_image = np.squeeze(hdulist[0].data)
hdr = hdulist[0].header
# define grid/coordinate system
RA_cen = hdr['CRVAL1']
DEC_cen = hdr['CRVAL2']
dRA = hdr['CDELT1']
dDEC = hdr['CDELT2']
nRA = hdr['NAXIS1']
nDEC = hdr['NAXIS2']
RA = RA_cen + dRA * (np.arange(nRA) - (hdr['CRPIX1']-1))
DEC = DEC_cen + dDEC * (np.arange(nDEC) - (hdr['CRPIX2']-1))
RAo, DECo = np.meshgrid(RA-RA_cen, DEC-DEC_cen)
# lots of important stuff happens...
# radial profile
radius = 3600.*np.sqrt(RAo**2 + DECo**2)
# average surface brightness in each bin
guess_sb = np.zeros_like(cb)
for i in range(len(cb)):
guess_sb[i] = np.mean(data_image[(radius > a[i]) & (radius < b[i])])
# scale to units of Jy per square arcsec
omega_beam = np.pi*(3600.**2)*hdr['BMAJ']*hdr['BMIN']/(4.*np.log(2.))
guess_sb /= omega_beam
# truth
rtruth = np.logspace(np.log10(rin), 0.36, num=500)
Ic = 0.0824975
rc = 55./140.
SBtruth = Ic * (rc/rtruth)
SBtruth[rtruth > rc] = Ic * (rtruth[rtruth > rc]/rc)**(-4.)
# initialize walkers
ndim, nwalkers, nthreads = nbins+4, 80, 8
sb_scl = 0.5*np.ones_like(guess_sb)
#sb_scl[cb < 0.5*3600.*hdr['BMAJ']] = 0.9
unrslvd = np.where(cb < 0.5*3600.*hdr['BMAJ'])
p0 = np.zeros((nwalkers, ndim))
guess_geo = np.zeros(4)
geo_scl = np.ones_like(guess_geo)
geo_scl[2:] = 0.05
for i in range(nwalkers):
mono = False
indx = 0
while (mono == False):
sbtrial = guess_sb*(1.+sb_scl*np.random.uniform(-1, 1, nbins))
sbtrial[unrslvd] = guess_sb[unrslvd] * \
(1.+3.*np.random.uniform(0, 1, len(unrslvd)))
mono = np.array_equal(np.sort(sbtrial), sbtrial[::-1])
indx += 1
geotrial = guess_geo[0:1]+geo_scl[0:1]*np.random.uniform(0, 1, 2)
offtrial = guess_geo[2:3]+geo_scl[2:3]*np.random.uniform(-1, 1, 2)
p0[i][:] = np.concatenate((geotrial, offtrial, sbtrial))
# plot initial ball of guesses for surface brightness profile
plt.axis([0.01, 1.5, 1e-4, 1e1])
plt.loglog(radius, data_image/omega_beam, '.y', alpha=0.01)
plt.loglog(rtruth, SBtruth, 'k-', cb, guess_sb, 'oc')
for i in range(nwalkers):
plt.loglog(cb, p0[i][4:], '-r', alpha=0.1)
plt.xlabel('radius [arcsec]')
plt.ylabel('surface brightness [Jy/arcsec**2]')
plt.savefig('blind2_fo.profile.png')
plt.close()
# load the "data" visibilities
data = np.load('data/blind2_fo.340GHz.vis.npz')
freq = 340e9
u = 1e-3*data['u']*freq/2.9979e8
v = 1e-3*data['v']*freq/2.9979e8
real = data['Re']
imag = data['Im']
wgt = 10000.*data['Wt']
# deproject
incl = 0.
PA = 0.
offset = | np.array([0., 0.]) | numpy.array |
import numpy as np
import pyqtgraph as pg
from PyQt5 import QtCore
from acconeer_utils.clients.reg.client import RegClient
from acconeer_utils.clients.json.client import JSONClient
from acconeer_utils.clients import configs
from acconeer_utils import example_utils
from acconeer_utils.pg_process import PGProcess, PGProccessDiedException
def main():
args = example_utils.ExampleArgumentParser(num_sens=1).parse_args()
example_utils.config_logging(args)
if args.socket_addr:
client = JSONClient(args.socket_addr)
else:
port = args.serial_port or example_utils.autodetect_serial_port()
client = RegClient(port)
config = get_base_config()
config.sensor = args.sensors
client.setup_session(config)
pg_updater = PGUpdater(config)
pg_process = PGProcess(pg_updater)
pg_process.start()
client.start_streaming()
interrupt_handler = example_utils.ExampleInterruptHandler()
print("Press Ctrl-C to end session")
processor = PresenceDetectionProcessor(config)
while not interrupt_handler.got_signal:
info, sweep = client.get_next()
plot_data = processor.process(sweep)
if plot_data is not None:
try:
pg_process.put_data(plot_data)
except PGProccessDiedException:
break
print("Disconnecting...")
pg_process.close()
client.disconnect()
def get_base_config():
config = configs.IQServiceConfig()
config.range_interval = [0.3, 0.9]
config.sweep_rate = 40
config.gain = 0.7
return config
class PresenceDetectionProcessor:
def __init__(self, config):
self.config = config
self.movement_history = np.zeros(5 * config.sweep_rate) # 5 seconds
self.a_fast_tau = 0.1
self.a_slow_tau = 1
self.a_move_tau = 1
self.a_fast = self.alpha(self.a_fast_tau, 1.0/config.sweep_rate)
self.a_slow = self.alpha(self.a_slow_tau, 1.0/config.sweep_rate)
self.a_move = self.alpha(self.a_move_tau, 1.0/config.sweep_rate)
self.sweep_lp_fast = None
self.sweep_lp_slow = None
self.movement_lp = 0
self.sweep_index = 0
def process(self, sweep):
if self.sweep_index == 0:
self.sweep_lp_fast = np.array(sweep)
self.sweep_lp_slow = np.array(sweep)
out_data = None
else:
self.sweep_lp_fast = self.sweep_lp_fast*self.a_fast + sweep*(1-self.a_fast)
self.sweep_lp_slow = self.sweep_lp_slow*self.a_slow + sweep*(1-self.a_slow)
movement = np.mean(np.abs(self.sweep_lp_fast - self.sweep_lp_slow))
movement *= 100
self.movement_lp = self.movement_lp*self.a_move + movement*(1-self.a_move)
self.movement_history = np.roll(self.movement_history, -1)
self.movement_history[-1] = self.movement_lp
out_data = {
"envelope": | np.abs(self.sweep_lp_fast) | numpy.abs |
#CSTAT+ A GPU-accelerated spatial pattern analysis algorithm for high-resolution 2D/3D hydrologic connectivity using array vectorization and convolutional neural network
#Author: <NAME>, <NAME>
#Department of Earth, Atmospheric and Planetary Sciences, Purdue University, 550 Stadium Mall Dr, West Lafayette, IN 47907 USA.
#Email: <EMAIL>; Alternative: <EMAIL>
#This is the omnidirectional version: CSTAT+/OMNI
import os
from osgeo import gdal
import numpy as np
import copy as cp
from numpy import genfromtxt as gft
from scipy.ndimage.measurements import label
from itertools import combinations_with_replacement,product
from mxnet import nd,gpu
from timeit import default_timer as timer
import pandas as pd
#Binarize pattern
def prep(expe0,threshold,NoData):
#Provide threshold for High/Low, usually the depth of shallow sheetflow
expe1=cp.deepcopy(expe0)
expe2=cp.deepcopy(expe0)
expe1[(expe1>=threshold)]=1
expe1[(expe1<threshold)]=0
expe2[(expe2==NoData)]=-1
expe2[(expe2>0)]=0
connection_structure = np.array([[1,1,1],[1,1,1],[1,1,1]])
expela, num_features =label (expe1,structure=connection_structure)
expe3=expe2+expela
return (expe3)
def itercontrol(regions,k,bins,dibins,dibins4,binnum):
#Initiate empty array for storing histogram for directions, distances, and number of counted pairs in each distance range bin
co0=nd.zeros(binnum-1,gpu(0),dtype="float32")
codi0=nd.zeros((4,binnum-1),gpu(0),dtype="float32")
count0=nd.zeros(binnum-1,gpu(0),dtype="float32")
count4=nd.zeros((4,binnum-1),gpu(0),dtype="float32")
co4=nd.zeros((4,binnum-1),gpu(0),dtype="float32")
bins=nd.array(bins,gpu(0))
dibins=nd.array(dibins,gpu(0))
dibins4=nd.array(dibins4,gpu(0))
if k==2:
#Create segment index for the input array to meet the memory requirement
imax=list(range(int(regions.shape[0]/broadcdp)+(regions.shape[0]%broadcdp!=0)))
#Combinations with repeated indicies
iterator=list(combinations_with_replacement(imax,2))
for i in iterator:
if i[0]==i[1]:
vout=distanceAA2(regions,i,binnum,dibins,dibins4)
co0+=vout[0]
codi0+=vout[1]
count0+=vout[2]
co4+=vout[3]
count4+=vout[4]
else:
vout=distanceAA1(regions,i,binnum,dibins,dibins4)
co0+=vout[0]
codi0+=vout[1]
count0+=vout[2]
co4+=vout[3]
count4+=vout[4]
return (co0.asnumpy(),codi0.asnumpy(),count0.asnumpy(),co4.asnumpy(),count4.asnumpy())
elif k==1:
#Create segment index for the input array to meet the memory requirement
imax=list(range(int(regions.shape[0]/broadcdp)+(regions.shape[0]%broadcdp!=0)))
#Combinations with repeated indicies
iterator=list(combinations_with_replacement(imax,2))
for i in iterator:
if i[0]==i[1]:
count0+=distance2(regions,i,binnum,bins)
else:
count0+=distance1(regions,i,binnum,bins)
return (count0.asnumpy())
else:
#Unpack the tuple
regions_high,regions_low=regions
#Create segment index for the input array to meet the memory requirement
imax_high=list(range(int(regions_high.shape[0]/broadcdp)+(regions_high.shape[0]%broadcdp!=0)))
imax_low=list(range(int(regions_low.shape[0]/broadcdp)+(regions_low.shape[0]%broadcdp!=0)))
#Combinations with repeated indicies
iterator=list(product(imax_high,imax_low))
for i in iterator:
count0+=distance11(regions_high,regions_low,i,binnum,bins)
return (count0.asnumpy())
def distanceAA1(regions,i,binnum,dibins,dibins4):
#Initiate empty array for storing histogram for directions, distances, and number of counted pairs in each distance range bin
co0=nd.zeros(binnum-1,gpu(0),dtype="float32")
codi0=nd.zeros((5,binnum-1),gpu(0),dtype="float32")
count0=nd.zeros(binnum-1,gpu(0),dtype="float32")
count4=nd.zeros((5,binnum-1),gpu(0),dtype="float32")
co4=nd.zeros((5,binnum-1),gpu(0),dtype="float32")
#Calculate index coordinates and directions by chuncks
a=regions[i[0]*broadcdp:min((i[0]+1)*broadcdp,regions.shape[0]),:]
b=regions[i[1]*broadcdp:min((i[1]+1)*broadcdp,regions.shape[0]),:]
a1=nd.array(a,gpu(0))
b1=nd.array(b,gpu(0))
a1_b1=(nd.expand_dims(a1,axis=1)-b1).reshape((-1,2))
x1_x2=a1_b1[:,0]
y1_y2=a1_b1[:,1]
labels=nd.zeros(x1_x2.shape[0],gpu(0),dtype="float32")
sdi0=(nd.degrees(nd.arctan((y1_y2)/(x1_x2)))+90).reshape((-1,))
ldis=nd.broadcast_hypot(x1_x2,y1_y2).reshape((-1,))
#Change 0 to 180 so it can apply sum of boolean mask without losing values
sdi0=nd.where(condition=(sdi0==0),x=labels+180,y=sdi0)
#Store sum of distances co0 and histogram of directions in each range bin
for p in range (0,binnum-1):
booleanmask=nd.equal((ldis>=bins[p]),(ldis<bins[p+1]))
count0[p]+=nd.nansum(booleanmask)
co0[p]+=nd.nansum(ldis*booleanmask)
#Exclue values not in distance range bin
sdi1=nd.where(condition=(booleanmask==0),x=labels-1,y=sdi0)
for q in range (0,5):
booleanmaskdi=nd.equal((sdi1>=dibins[q]),(sdi1<dibins[q+1]))
codi0[q,p]+=nd.nansum(booleanmaskdi)
for k in range (0,5):
booleanmaskdi=nd.equal((sdi0>=dibins4[k]),(sdi0<dibins4[k+1]))
ldis0=ldis*booleanmaskdi
for l in range (0,binnum-1):
booleanmask=nd.equal((ldis0>=bins[l]),(ldis0<bins[l+1]))
count4[k,l]+=nd.nansum(booleanmask)
co4[k,l]+=nd.nansum(ldis0*booleanmask)
codi0[0,:]+=codi0[4,:]
codi0=codi0[0:4,:]
count4[0,:]+=count4[4,:]
count4=count4[0:4,:]
co4[0,:]+=co4[4,:]
co4=co4[0:4,:]
return(co0,codi0,count0,co4,count4)
def distanceAA2(regions,i,binnum,dibins,dibins4):
#Initiate empty array for storing histogram for directions, distances, and number of counted pairs in each distance range bin
co0=nd.zeros(binnum-1,gpu(0),dtype="float32")
codi0=nd.zeros((5,binnum-1),gpu(0),dtype="float32")
count0=nd.zeros(binnum-1,gpu(0),dtype="float32")
count4=nd.zeros((5,binnum-1),gpu(0),dtype="float32")
co4=nd.zeros((5,binnum-1),gpu(0),dtype="float32")
seed=nd.zeros((1,2),gpu(0))
#Calculate index coordinates and directions by chuncks
a=regions[i[0]*broadcdp:min((i[0]+1)*broadcdp,regions.shape[0]),:]
b=regions[i[1]*broadcdp:min((i[1]+1)*broadcdp,regions.shape[0]),:]
a1=nd.array(a,gpu(0))
b1=nd.array(b,gpu(0))
# print ("a1",a1,"b1",b1)
for ii in range (a1.shape[0]-1):
a1_b1=(nd.expand_dims(a1[ii].reshape((1,2)),axis=1)-b1[ii+1:,:]).reshape((a1[ii+1:,:].shape[0],2))
seed=nd.concat(seed,a1_b1,dim=0)
if seed.shape[0]>1:
x1_x2=seed[1:,0]
y1_y2=seed[1:,1]
labels=nd.zeros(x1_x2.shape[0],gpu(0),dtype="float32")
sdi0=(nd.degrees(nd.arctan((y1_y2)/(x1_x2)))+90).reshape((-1,))
ldis=nd.broadcast_hypot(x1_x2,y1_y2).reshape((-1,))
#Change 0 to 180 so it can apply sum of boolean mask without losing values
sdi0=nd.where(condition=(sdi0==0),x=labels+180,y=sdi0)
#Store sum of distances co0 and histogram of directions in each range bin
for p in range (0,binnum-1):
booleanmask=nd.equal((ldis>=bins[p]),(ldis<bins[p+1]))
count0[p]+=nd.nansum(booleanmask)
co0[p]+=nd.nansum(ldis*booleanmask)
#Exclue values not in distance range bin
sdi1=nd.where(condition=(booleanmask==0),x=labels-1,y=sdi0)
for q in range (0,5):
booleanmaskdi=nd.equal((sdi1>=dibins[q]),(sdi1<dibins[q+1]))
codi0[q,p]+=nd.nansum(booleanmaskdi)
for k in range (0,5):
booleanmaskdi=nd.equal((sdi0>=dibins4[k]),(sdi0<dibins4[k+1]))
ldis0=ldis*booleanmaskdi
for l in range (0,binnum-1):
booleanmask=nd.equal((ldis0>=bins[l]),(ldis0<bins[l+1]))
count4[k,l]+=nd.nansum(booleanmask)
co4[k,l]+=nd.nansum(ldis0*booleanmask)
codi0[0,:]+=codi0[4,:]
codi0=codi0[0:4,:]
count4[0,:]+=count4[4,:]
count4=count4[0:4,:]
co4[0,:]+=co4[4,:]
co4=co4[0:4,:]
return(co0,codi0,count0,co4,count4)
#Full permutation distance computation
def distance1(regions,i,binnum,bins):
#Initiate empty array for storing the number of counted pairs in each distance range bin
count0=nd.zeros(binnum-1,gpu(0),dtype="float32")
#Calculate index coordinates and directions by chuncks
a=regions[i[0]*broadcdp:min((i[0]+1)*broadcdp,regions.shape[0]),:]
b=regions[i[1]*broadcdp:min((i[1]+1)*broadcdp,regions.shape[0]),:]
a1=nd.array(a,gpu(0))
b1=nd.array(b,gpu(0))
a1_b1=(nd.expand_dims(a1,axis=1)-b1).reshape((-1,2))
x1_x2=a1_b1[:,0]
y1_y2=a1_b1[:,1]
ldis=nd.broadcast_hypot(x1_x2,y1_y2).reshape((-1,))
for p in range (0,binnum-1):
booleanmask=nd.equal((ldis>=bins[p]),(ldis<bins[p+1]))
count0[p]+=nd.nansum(booleanmask)
return(count0)
#Full permutation distance computation between different regions: high and low
def distance11(regions_high,regions_low,i,binnum,bins):
#Initiate empty array for storing the number of counted pairs in each distance range bin
count0=nd.zeros(binnum-1,gpu(0),dtype="float32")
#Calculate index coordinates and directions by chuncks
a=regions_high[i[0]*broadcdp:min((i[0]+1)*broadcdp,regions_high.shape[0]),:]
b=regions_low[i[1]*broadcdp:min((i[1]+1)*broadcdp,regions_low.shape[0]),:]
a1=nd.array(a,gpu(0))
b1=nd.array(b,gpu(0))
a1_b1=(nd.expand_dims(a1,axis=1)-b1).reshape((-1,2))
x1_x2=a1_b1[:,0]
y1_y2=a1_b1[:,1]
ldis=nd.broadcast_hypot(x1_x2,y1_y2).reshape((-1,))
for p in range (0,binnum-1):
booleanmask=nd.equal((ldis>=bins[p]),(ldis<bins[p+1]))
count0[p]+=nd.nansum(booleanmask)
return(count0)
#Full combination distance computation
def distance2(regions,i,binnum,bins):
#Initiate empty array for storing the number of counted pairs in each distance range bin
count0=nd.zeros(binnum-1,gpu(0),dtype="float32")
seed=nd.zeros((1,2),gpu(0))
#Calculate index coordinates and directions by chuncks
a=regions[i[0]*broadcdp:min((i[0]+1)*broadcdp,regions.shape[0]),:]
b=regions[i[1]*broadcdp:min((i[1]+1)*broadcdp,regions.shape[0]),:]
a1=nd.array(a,gpu(0))
b1=nd.array(b,gpu(0))
for ii in range (a1.shape[0]-1):
a1_b1=(nd.expand_dims(a1[ii].reshape((1,2)),axis=1)-b1[ii+1:,:]).reshape((a1[ii+1:,:].shape[0],2))
seed=nd.concat(seed,a1_b1,dim=0)
if seed.shape[0]>1:
x1_x2=seed[1:,0]
y1_y2=seed[1:,1]
ldis=nd.broadcast_hypot(x1_x2,y1_y2).reshape((-1,))
for p in range (0,binnum-1):
booleanmask=nd.equal((ldis>=bins[p]),(ldis<bins[p+1]))
count0[p]+=nd.nansum(booleanmask)
return(count0)
def omni(taoh_W,mean_d,cardh_his,taoh_W4,mean_d4,binnum):
#Compute OMNI
OMNIW=np.zeros(binnum,dtype="float32")
OMNIW4=np.zeros((4,binnum),dtype="float32")
#Convert Nan to zero to avoid issues
taoh_W1=np.nan_to_num(taoh_W)
mean_d1=np.nan_to_num(mean_d)
taoh_W41=np.nan_to_num(taoh_W4)
mean_d41=np.nan_to_num(mean_d4)
for j in range (binnum-1):
if taoh_W1[j+1]!=0:
OMNIW[0]+=(taoh_W1[j]+taoh_W1[j+1])*(mean_d1[j+1]-mean_d1[j])*0.5
for k in range (4):
for l in range (binnum-1):
if taoh_W41[k,l+1]!=0:
OMNIW4[k,0]+=(taoh_W41[k,l]+taoh_W41[k,l+1])*(mean_d41[k,l+1]-mean_d41[k,l])*0.5
results=np.vstack((taoh_W1,mean_d1,OMNIW,cardh_his))
results4=np.vstack((taoh_W41,mean_d41,OMNIW4))
return (results,results4)
def compu(flowpattern,bins,dibins,dibins4,binnum,gt):
#Initiate empty array for storing histogram for directions, distances, and number of counted pairs in each distance range bin
coAA=np.zeros((1,binnum-1),dtype="float32")
codiAA=np.zeros((4,binnum-1),dtype="float32")
countAA=np.zeros(binnum-1)
countAZ=np.zeros(binnum-1)
count4AA=np.zeros((4,binnum-1),dtype="float32")
co4AA=np.zeros((4,binnum-1),dtype="float32")
#Create coordinate arrays for each zone and compute distances and directions
#All the domain area excluding NoData
#Area of High
k=1
regionA=np.asarray(np.where(flowpattern>0),dtype="int32").T
if regionA.shape[0]!=0:
countA=itercontrol(regionA,k,bins,dibins,dibins4,binnum)
k=0
regionZ=np.asarray(np.where(flowpattern==0),dtype="int32").T
if regionZ.shape[0]!=0:
countAZ=itercontrol((regionA,regionZ),k,bins,dibins,dibins4,binnum)
#Each connected region in High
k=2#Switch
for i in range (1,np.int32(np.amax(flowpattern)+1)):
regionAA=np.asarray(np.where(flowpattern==i),dtype="int32").T
outAA=itercontrol(regionAA,k,bins,dibins,dibins4,binnum)
coAA+=outAA[0];codiAA+=outAA[1];countAA+=outAA[2];co4AA+=outAA[3];count4AA+=outAA[4]
#Compute connectivity metrics
if np.sum(countAZ)==0:
taoh_W= | np.append(1,(countAA/(countA+countAZ))) | numpy.append |
from __future__ import print_function
"""
Markov based methods for spatial dynamics.
"""
__author__ = "<NAME> <<EMAIL>"
__all__ = ["Markov", "LISA_Markov", "Spatial_Markov", "kullback",
"prais", "shorrock", "homogeneity"]
import numpy as np
from pysal.spatial_dynamics.ergodic import fmpt
from pysal.spatial_dynamics.ergodic import steady_state as STEADY_STATE
from scipy import stats
from operator import gt
import pysal
# TT predefine LISA transitions
# TT[i,j] is the transition type from i to j
# i = quadrant in period 0
# j = quadrant in period 1
# uses one offset so first row and col of TT are ignored
TT = np.zeros((5, 5), int)
c = 1
for i in range(1, 5):
for j in range(1, 5):
TT[i, j] = c
c += 1
# MOVE_TYPES is a dictionary that returns the move type of a LISA transition
# filtered on the significance of the LISA end points
# True indicates significant LISA in a particular period
# e.g. a key of (1, 3, True, False) indicates a significant LISA located in
# quadrant 1 in period 0 moved to quadrant 3 in period 1 but was not
# significant in quadrant 3.
MOVE_TYPES = {}
c = 1
cases = (True, False)
sig_keys = [(i, j) for i in cases for j in cases]
for i, sig_key in enumerate(sig_keys):
c = 1 + i * 16
for i in range(1, 5):
for j in range(1, 5):
key = (i, j, sig_key[0], sig_key[1])
MOVE_TYPES[key] = c
c += 1
class Markov(object):
"""
Classic Markov transition matrices.
Parameters
----------
class_ids : array
(n, t), one row per observation, one column recording the
state of each observation, with as many columns as time
periods.
classes : array
(k, 1), all different classes (bins) of the matrix.
Attributes
----------
p : matrix
(k, k), transition probability matrix.
steady_state : matrix
(k, 1), ergodic distribution.
transitions : matrix
(k, k), count of transitions between each state i and j.
Examples
--------
>>> c = [['b','a','c'],['c','c','a'],['c','b','c']]
>>> c.extend([['a','a','b'], ['a','b','c']])
>>> c = np.array(c)
>>> m = Markov(c)
>>> m.classes.tolist()
['a', 'b', 'c']
>>> m.p
matrix([[ 0.25 , 0.5 , 0.25 ],
[ 0.33333333, 0. , 0.66666667],
[ 0.33333333, 0.33333333, 0.33333333]])
>>> m.steady_state
matrix([[ 0.30769231],
[ 0.28846154],
[ 0.40384615]])
US nominal per capita income 48 states 81 years 1929-2009
>>> import pysal
>>> f = pysal.open(pysal.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
set classes to quintiles for each year
>>> q5 = np.array([pysal.Quantiles(y).yb for y in pci]).transpose()
>>> m = Markov(q5)
>>> m.transitions
array([[ 729., 71., 1., 0., 0.],
[ 72., 567., 80., 3., 0.],
[ 0., 81., 631., 86., 2.],
[ 0., 3., 86., 573., 56.],
[ 0., 0., 1., 57., 741.]])
>>> m.p
matrix([[ 0.91011236, 0.0886392 , 0.00124844, 0. , 0. ],
[ 0.09972299, 0.78531856, 0.11080332, 0.00415512, 0. ],
[ 0. , 0.10125 , 0.78875 , 0.1075 , 0.0025 ],
[ 0. , 0.00417827, 0.11977716, 0.79805014, 0.07799443],
[ 0. , 0. , 0.00125156, 0.07133917, 0.92740926]])
>>> m.steady_state
matrix([[ 0.20774716],
[ 0.18725774],
[ 0.20740537],
[ 0.18821787],
[ 0.20937187]])
Relative incomes
>>> pci = pci.transpose()
>>> rpci = pci/(pci.mean(axis=0))
>>> rq = pysal.Quantiles(rpci.flatten()).yb
>>> rq.shape = (48,81)
>>> mq = Markov(rq)
>>> mq.transitions
array([[ 707., 58., 7., 1., 0.],
[ 50., 629., 80., 1., 1.],
[ 4., 79., 610., 73., 2.],
[ 0., 7., 72., 650., 37.],
[ 0., 0., 0., 48., 724.]])
>>> mq.steady_state
matrix([[ 0.17957376],
[ 0.21631443],
[ 0.21499942],
[ 0.21134662],
[ 0.17776576]])
"""
def __init__(self, class_ids, classes=None):
if classes is not None:
self.classes = classes
else:
self.classes = np.unique(class_ids)
n, t = class_ids.shape
k = len(self.classes)
js = range(t - 1)
classIds = self.classes.tolist()
transitions = np.zeros((k, k))
for state_0 in js:
state_1 = state_0 + 1
state_0 = class_ids[:, state_0]
state_1 = class_ids[:, state_1]
initial = np.unique(state_0)
for i in initial:
ending = state_1[state_0 == i]
uending = np.unique(ending)
row = classIds.index(i)
for j in uending:
col = classIds.index(j)
transitions[row, col] += sum(ending == j)
self.transitions = transitions
row_sum = transitions.sum(axis=1)
p = np.dot(np.diag(1 / (row_sum + (row_sum == 0))), transitions)
self.p = np.matrix(p)
@property
def steady_state(self):
if not hasattr(self, '_steady_state'):
self._steady_state = STEADY_STATE(self.p)
return self._steady_state
class Spatial_Markov(object):
"""
Markov transitions conditioned on the value of the spatial lag.
Parameters
----------
y : array
(n,t), one row per observation, one column per state of
each observation, with as many columns as time periods.
w : W
spatial weights object.
k : integer
number of classes (quantiles).
permutations : int, optional
number of permutations for use in randomization based
inference (the default is 0).
fixed : bool
If true, quantiles are taken over the entire n*t
pooled series. If false, quantiles are taken each
time period over n.
variable_name : string
name of variable.
Attributes
----------
p : matrix
(k, k), transition probability matrix for a-spatial
Markov.
s : matrix
(k, 1), ergodic distribution for a-spatial Markov.
transitions : matrix
(k, k), counts of transitions between each state i and j
for a-spatial Markov.
T : matrix
(k, k, k), counts of transitions for each conditional
Markov. T[0] is the matrix of transitions for
observations with lags in the 0th quantile; T[k-1] is the
transitions for the observations with lags in the k-1th.
P : matrix
(k, k, k), transition probability matrix for spatial
Markov first dimension is the conditioned on the lag.
S : matrix
(k, k), steady state distributions for spatial Markov.
Each row is a conditional steady_state.
F : matrix
(k, k, k),first mean passage times.
First dimension is conditioned on the lag.
shtest : list
(k elements), each element of the list is a tuple for a
multinomial difference test between the steady state
distribution from a conditional distribution versus the
overall steady state distribution: first element of the
tuple is the chi2 value, second its p-value and the third
the degrees of freedom.
chi2 : list
(k elements), each element of the list is a tuple for a
chi-squared test of the difference between the
conditional transition matrix against the overall
transition matrix: first element of the tuple is the chi2
value, second its p-value and the third the degrees of
freedom.
x2 : float
sum of the chi2 values for each of the conditional tests.
Has an asymptotic chi2 distribution with k(k-1)(k-1)
degrees of freedom. Under the null that transition
probabilities are spatially homogeneous.
(see chi2 above)
x2_dof : int
degrees of freedom for homogeneity test.
x2_pvalue : float
pvalue for homogeneity test based on analytic.
distribution
x2_rpvalue : float
(if permutations>0)
pseudo p-value for x2 based on random spatial
permutations of the rows of the original transitions.
x2_realizations : array
(permutations,1), the values of x2 for the random
permutations.
Q : float
Chi-square test of homogeneity across lag classes based
on Bickenbach and Bode (2003) [Bickenbach2003]_.
Q_p_value : float
p-value for Q.
LR : float
Likelihood ratio statistic for homogeneity across lag
classes based on Bickenback and Bode (2003)
[Bickenbach2003]_.
LR_p_value : float
p-value for LR.
dof_hom : int
degrees of freedom for LR and Q, corrected for 0 cells.
Notes
-----
Based on Rey (2001) [Rey2001]_.
The shtest and chi2 tests should be used with caution as they are based on
classic theory assuming random transitions. The x2 based test is
preferable since it simulates the randomness under the null. It is an
experimental test requiring further analysis.
This is new
Examples
--------
>>> import pysal as ps
>>> f = ps.open(ps.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
>>> pci = pci.transpose()
>>> rpci = pci/(pci.mean(axis=0))
>>> w = ps.open(ps.examples.get_path("states48.gal")).read()
>>> w.transform = 'r'
>>> sm = ps.Spatial_Markov(rpci, w, fixed=True, k=5, variable_name='rpci')
>>> for p in sm.P:
... print(p)
...
[[ 0.96341463 0.0304878 0.00609756 0. 0. ]
[ 0.06040268 0.83221477 0.10738255 0. 0. ]
[ 0. 0.14 0.74 0.12 0. ]
[ 0. 0.03571429 0.32142857 0.57142857 0.07142857]
[ 0. 0. 0. 0.16666667 0.83333333]]
[[ 0.79831933 0.16806723 0.03361345 0. 0. ]
[ 0.0754717 0.88207547 0.04245283 0. 0. ]
[ 0.00537634 0.06989247 0.8655914 0.05913978 0. ]
[ 0. 0. 0.06372549 0.90196078 0.03431373]
[ 0. 0. 0. 0.19444444 0.80555556]]
[[ 0.84693878 0.15306122 0. 0. 0. ]
[ 0.08133971 0.78947368 0.1291866 0. 0. ]
[ 0.00518135 0.0984456 0.79274611 0.0984456 0.00518135]
[ 0. 0. 0.09411765 0.87058824 0.03529412]
[ 0. 0. 0. 0.10204082 0.89795918]]
[[ 0.8852459 0.09836066 0. 0.01639344 0. ]
[ 0.03875969 0.81395349 0.13953488 0. 0.00775194]
[ 0.0049505 0.09405941 0.77722772 0.11881188 0.0049505 ]
[ 0. 0.02339181 0.12865497 0.75438596 0.09356725]
[ 0. 0. 0. 0.09661836 0.90338164]]
[[ 0.33333333 0.66666667 0. 0. 0. ]
[ 0.0483871 0.77419355 0.16129032 0.01612903 0. ]
[ 0.01149425 0.16091954 0.74712644 0.08045977 0. ]
[ 0. 0.01036269 0.06217617 0.89637306 0.03108808]
[ 0. 0. 0. 0.02352941 0.97647059]]
The probability of a poor state remaining poor is 0.963 if their
neighbors are in the 1st quintile and 0.798 if their neighbors are
in the 2nd quintile. The probability of a rich economy remaining
rich is 0.976 if their neighbors are in the 5th quintile, but if their
neighbors are in the 4th quintile this drops to 0.903.
The Q and likelihood ratio statistics are both significant indicating
the dynamics are not homogeneous across the lag classes:
>>> "%.3f"%sm.LR
'170.659'
>>> "%.3f"%sm.Q
'200.624'
>>> "%.3f"%sm.LR_p_value
'0.000'
>>> "%.3f"%sm.Q_p_value
'0.000'
>>> sm.dof_hom
60
The long run distribution for states with poor (rich) neighbors has
0.435 (0.018) of the values in the first quintile, 0.263 (0.200) in
the second quintile, 0.204 (0.190) in the third, 0.0684 (0.255) in the
fourth and 0.029 (0.337) in the fifth quintile.
>>> sm.S
array([[ 0.43509425, 0.2635327 , 0.20363044, 0.06841983, 0.02932278],
[ 0.13391287, 0.33993305, 0.25153036, 0.23343016, 0.04119356],
[ 0.12124869, 0.21137444, 0.2635101 , 0.29013417, 0.1137326 ],
[ 0.0776413 , 0.19748806, 0.25352636, 0.22480415, 0.24654013],
[ 0.01776781, 0.19964349, 0.19009833, 0.25524697, 0.3372434 ]])
States with incomes in the first quintile with neighbors in the
first quintile return to the first quartile after 2.298 years, after
leaving the first quintile. They enter the fourth quintile after
80.810 years after leaving the first quintile, on average.
Poor states within neighbors in the fourth quintile return to the
first quintile, on average, after 12.88 years, and would enter the
fourth quintile after 28.473 years.
>>> for f in sm.F:
... print(f)
...
[[ 2.29835259 28.95614035 46.14285714 80.80952381 279.42857143]
[ 33.86549708 3.79459555 22.57142857 57.23809524 255.85714286]
[ 43.60233918 9.73684211 4.91085714 34.66666667 233.28571429]
[ 46.62865497 12.76315789 6.25714286 14.61564626 198.61904762]
[ 52.62865497 18.76315789 12.25714286 6. 34.1031746 ]]
[[ 7.46754205 9.70574606 25.76785714 74.53116883 194.23446197]
[ 27.76691978 2.94175577 24.97142857 73.73474026 193.4380334 ]
[ 53.57477715 28.48447637 3.97566318 48.76331169 168.46660482]
[ 72.03631562 46.94601483 18.46153846 4.28393653 119.70329314]
[ 77.17917276 52.08887197 23.6043956 5.14285714 24.27564033]]
[[ 8.24751154 6.53333333 18.38765432 40.70864198 112.76732026]
[ 47.35040872 4.73094099 11.85432099 34.17530864 106.23398693]
[ 69.42288828 24.76666667 3.794921 22.32098765 94.37966594]
[ 83.72288828 39.06666667 14.3 3.44668119 76.36702977]
[ 93.52288828 48.86666667 24.1 9.8 8.79255406]]
[[ 12.87974382 13.34847151 19.83446328 28.47257282 55.82395142]
[ 99.46114206 5.06359731 10.54545198 23.05133495 49.68944423]
[ 117.76777159 23.03735526 3.94436301 15.0843986 43.57927247]
[ 127.89752089 32.4393006 14.56853107 4.44831643 31.63099455]
[ 138.24752089 42.7893006 24.91853107 10.35 4.05613474]]
[[ 56.2815534 1.5 10.57236842 27.02173913 110.54347826]
[ 82.9223301 5.00892857 9.07236842 25.52173913 109.04347826]
[ 97.17718447 19.53125 5.26043557 21.42391304 104.94565217]
[ 127.1407767 48.74107143 33.29605263 3.91777427 83.52173913]
[ 169.6407767 91.24107143 75.79605263 42.5 2.96521739]]
"""
def __init__(self, y, w, k=4, permutations=0, fixed=False,
variable_name=None):
self.y = y
rows, cols = y.shape
self.k = k
self.cols = cols
npa = np.array
self.fixed = fixed
self.variable_name = variable_name
if fixed:
yf = y.flatten()
yb = pysal.Quantiles(yf, k=k).yb
yb.shape = (rows, cols)
classes = yb
else:
classes = npa([pysal.Quantiles(y[:, i], k=k)
.yb for i in np.arange(cols)]).transpose()
classic = Markov(classes)
self.classes = classes
self.p = classic.p
self.transitions = classic.transitions
T, P = self._calc(y, w, classes, k=k)
self.T = T
self.P = P
if permutations:
nrp = np.random.permutation
counter = 0
x2_realizations = np.zeros((permutations, 1))
for perm in range(permutations):
T, P = self._calc(nrp(y), w, classes, k=k)
x2 = [chi2(T[i], self.transitions)[0] for i in range(k)]
x2s = sum(x2)
x2_realizations[perm] = x2s
if x2s >= self.x2:
counter += 1
self.x2_rpvalue = (counter + 1.0) / (permutations + 1.)
self.x2_realizations = x2_realizations
@property
def s(self):
if not hasattr(self, '_s'):
self._s = STEADY_STATE(self.p)
return self._s
@property
def S(self):
if not hasattr(self, '_S'):
S = np.zeros_like(self.p)
for i, p in enumerate(self.P):
S[i] = STEADY_STATE(p)
self._S = np.asarray(S)
return self._S
@property
def F(self):
if not hasattr(self, '_F'):
F = np.zeros_like(self.P)
for i, p in enumerate(self.P):
F[i] = fmpt(np.asmatrix(p))
self._F = np.asarray(F)
return self._F
# bickenbach and bode tests
@property
def ht(self):
if not hasattr(self, '_ht'):
self._ht = homogeneity(self.T)
return self._ht
@property
def Q(self):
if not hasattr(self, '_Q'):
self._Q = self.ht.Q
return self._Q
@property
def Q_p_value(self):
self._Q_p_value = self.ht.Q_p_value
return self._Q_p_value
@property
def LR(self):
self._LR = self.ht.LR
return self._LR
@property
def LR_p_value(self):
self._LR_p_value = self.ht.LR_p_value
return self._LR_p_value
@property
def dof_hom(self):
self._dof_hom = self.ht.dof
return self._dof_hom
# shtests
@property
def shtest(self):
if not hasattr(self, '_shtest'):
self._shtest = self._mn_test()
return self._shtest
@property
def chi2(self):
if not hasattr(self, '_chi2'):
self._chi2 = self._chi2_test()
return self._chi2
@property
def x2(self):
if not hasattr(self, '_x2'):
self._x2 = sum([c[0] for c in self.chi2])
return self._x2
@property
def x2_pvalue(self):
if not hasattr(self, '_x2_pvalue'):
self._x2_pvalue = 1 - stats.chi2.cdf(self.x2, self.x2_dof)
return self._x2_pvalue
@property
def x2_dof(self):
if not hasattr(self, '_x2_dof'):
k = self.k
self._x2_dof = k * (k - 1) * (k - 1)
return self._x2_dof
def _calc(self, y, w, classes, k):
ly = pysal.lag_spatial(w, y)
npa = np.array
if self.fixed:
l_classes = pysal.Quantiles(ly.flatten(), k=k).yb
l_classes.shape = ly.shape
else:
l_classes = npa([pysal.Quantiles(
ly[:, i], k=k).yb for i in np.arange(self.cols)])
l_classes = l_classes.transpose()
T = np.zeros((k, k, k))
n, t = y.shape
for t1 in range(t - 1):
t2 = t1 + 1
for i in range(n):
T[l_classes[i, t1], classes[i, t1], classes[i, t2]] += 1
P = np.zeros_like(T)
for i, mat in enumerate(T):
row_sum = mat.sum(axis=1)
row_sum = row_sum + (row_sum == 0)
p_i = np.matrix(np.diag(1. / row_sum) * np.matrix(mat))
P[i] = p_i
return T, P
def _mn_test(self):
"""
helper to calculate tests of differences between steady state
distributions from the conditional and overall distributions.
"""
n, t = self.y.shape
n0, n1, n2 = self.T.shape
rn = range(n0)
mat = [self._ssmnp_test(
self.s, self.S[i], self.T[i].sum()) for i in rn]
return mat
def _ssmnp_test(self, p1, p2, nt):
"""
Steady state multinomial probability difference test.
Arguments
---------
p1 : array
(k, 1), first steady state probability distribution.
p1 : array
(k, 1), second steady state probability distribution.
nt : int
number of transitions to base the test on.
Returns
-------
tuple
(3 elements)
(chi2 value, pvalue, degrees of freedom)
"""
p1 = np.array(p1)
k, c = p1.shape
p1.shape = (k, )
o = nt * p2
e = nt * p1
d = np.multiply((o - e), (o - e))
d = d / e
chi2 = d.sum()
pvalue = 1 - stats.chi2.cdf(chi2, k - 1)
return (chi2, pvalue, k - 1)
def _chi2_test(self):
"""
helper to calculate tests of differences between the conditional
transition matrices and the overall transitions matrix.
"""
n, t = self.y.shape
n0, n1, n2 = self.T.shape
rn = range(n0)
mat = [chi2(self.T[i], self.transitions) for i in rn]
return mat
def summary(self, file_name=None):
class_names = ["C%d" % i for i in range(self.k)]
regime_names = ["LAG%d" % i for i in range(self.k)]
ht = homogeneity(self.T, class_names=class_names,
regime_names=regime_names)
title = "Spatial Markov Test"
if self.variable_name:
title = title + ": " + self.variable_name
if file_name:
ht.summary(file_name=file_name, title=title)
else:
ht.summary(title=title)
def chi2(T1, T2):
"""
chi-squared test of difference between two transition matrices.
Parameters
----------
T1 : matrix
(k, k), matrix of transitions (counts).
T2 : matrix
(k, k), matrix of transitions (counts) to use to form the
probabilities under the null.
Returns
-------
: tuple
(3 elements).
(chi2 value, pvalue, degrees of freedom).
Examples
--------
>>> import pysal
>>> f = pysal.open(pysal.examples.get_path("usjoin.csv"))
>>> years = range(1929, 2010)
>>> pci = np.array([f.by_col[str(y)] for y in years]).transpose()
>>> rpci = pci/(pci.mean(axis=0))
>>> w = pysal.open(pysal.examples.get_path("states48.gal")).read()
>>> w.transform='r'
>>> sm = Spatial_Markov(rpci, w, fixed=True)
>>> T1 = sm.T[0]
>>> T1
array([[ 562., 22., 1., 0.],
[ 12., 201., 22., 0.],
[ 0., 17., 97., 4.],
[ 0., 0., 3., 19.]])
>>> T2 = sm.transitions
>>> T2
array([[ 884., 77., 4., 0.],
[ 68., 794., 87., 3.],
[ 1., 92., 815., 51.],
[ 1., 0., 60., 903.]])
>>> chi2(T1,T2)
(23.397284414732951, 0.0053631167048613371, 9)
Notes
-----
Second matrix is used to form the probabilities under the null.
Marginal sums from first matrix are distributed across these probabilities
under the null. In other words the observed transitions are taken from T1
while the expected transitions are formed as follows
.. math::
E_{i,j} = \sum_j T1_{i,j} * T2_{i,j}/\sum_j T2_{i,j}
Degrees of freedom corrected for any rows in either T1 or T2 that have
zero total transitions.
"""
rs2 = T2.sum(axis=1)
rs1 = T1.sum(axis=1)
rs2nz = rs2 > 0
rs1nz = rs1 > 0
dof1 = sum(rs1nz)
dof2 = sum(rs2nz)
rs2 = rs2 + (rs2 == 0)
dof = (dof1 - 1) * (dof2 - 1)
p = np.diag(1 / rs2) * np.matrix(T2)
E = np.diag(rs1) * | np.matrix(p) | numpy.matrix |
############################################################################################
### Functions to analyze and plot weight and activity distributions from simulation data ###
############################################################################################
### Copyright 2019-2021 <NAME>
### licensed under Apache-2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from utilityFunctions import *
import sys
import warnings
import os.path
import numpy as np
from pathlib import Path
from subprocess import call
# findOverallMinMax
# Determines the minimum and maximum values across all data files that are located somewhere in a given directory
# and that have the same readout time
# nppath: path to the directory to read the data from
# Nl_exc: the number of excitatory neurons in one line of a quadratic grid
# time_for_readout: the time that at which the weights shall be read out (as a string)
# h_0: the initial weight, and normalization factor for z
# return: two-dimensional array containing the minimum and maximum values for the four different data types
def findOverallMinMax(nppath, Nl_exc, time_for_readout, h_0):
sysmin, sysmax = sys.float_info.min, sys.float_info.max
(h_min, z_min, w_min, v_min) = (sysmax, sysmax, sysmax, sysmax) # initially, assign the maximum possible value
(h_max, z_max, w_max, v_max) = (sysmin, sysmin, sysmin, sysmin) # initially, assign the minimum possible value
# recurseFind
# Function to recursively move through directories and look for data to find their minima/maxima
# path: the directory to iterate through
def recurseFindMinMax(path):
nonlocal h_min, z_min, w_min, v_min
nonlocal h_max, z_max, w_max, v_max
rawpaths = Path(path)
for x in rawpaths.iterdir():
if x.is_dir():
recurseFindMinMax(x) # if the found file is a directory, recurse into it
tmppath = str(x)
if ("_net_" + time_for_readout + ".txt") in tmppath: # file containing network simulation data found
# read data from file
try:
connections, h, z, v = readWeightMatrixData(tmppath, Nl_exc)
h = h[connections] # reduce h (leave out non-existent synapses)
z = h_0*z[connections] # reduce and normalize z
w = h + z # compute total synaptic weight
except ValueError:
raise
except OSError:
raise
# checkAndAdjust
# Compares two numbers and returns the larger/lower one, depending on the operator
# a: a floating point number
# b: a floating point number
# op [optional]: the operator to be used
# return: the larger/lower one of the two numbers
def checkAndAdjust(a, b, op=">"):
if b > a:
return b if op == ">" else a
else:
return a if op == ">" else b
# adjust maxima
h_max = checkAndAdjust(h_max, np.max(h), ">")
z_max = checkAndAdjust(z_max, np.max(z), ">")
w_max = checkAndAdjust(w_max, np.max(w), ">")
v_max = checkAndAdjust(v_max, np.max(v), ">")
# adjust minima
h_min = checkAndAdjust(h_min, np.min(h), "<")
z_min = checkAndAdjust(z_min, np.min(z), "<")
w_min = checkAndAdjust(w_min, np.min(w), "<")
v_min = checkAndAdjust(v_min, np.min(v), "<")
# iterate across files in the directory
recurseFindMinMax(nppath)
return np.array([[h_min, h_max], [z_min, z_max], [w_min, w_max], [v_min, v_max]])
# plotDistributions
# Creates data and plot files of the weight and activity distribution at a given time
# nppath: path to the directory to read the data from
# timestamp: a string containing date and time (to access correct paths) OR equal to "any"
# add: additional descriptor
# Nl_exc: the number of excitatory neurons in one line of a quadratic grid
# time_for_readout: the time that at which the weights shall be read out (as a string)
# core: array of indices of the cell assembly (core) neurons
# h_0 [optional]: the initial weight, and normalization factor for z
# norm_all [optional]: specifies whether to normalize across all subpopulations (True) or across each subpop. individually (False)
# - the first is recommendable if samples of different subpopulations are compared against each other,
# the latter is recommendable if different samples of the same subpopulation are compared
# bins [optional]: list of four arrays, each containing the bins for one of the four quantities
def plotDistributions(nppath, timestamp, add, Nl_exc, time_for_readout, core, h_0=0.420075, norm_all=False, bins=None):
orgdir = os.getcwd() # store the current working directory
# "any" case: not looking for a specific timestamp, but for any data with a certain time_for_readout in the given directory
if timestamp == "any":
if bins is None:
warnings.warn("Warning: timestamp=\"any\": bins should be provided by the calling function to compare across trials.")
rawpaths = Path(nppath)
for x in rawpaths.iterdir():
tmppath = os.path.split(str(x))[1] # remove head from path
if ("_net_" + time_for_readout + ".txt") in tmppath:
timestamp = tmppath.split("_net_")[0]
plotDistributions(nppath, timestamp, add, Nl_exc, time_for_readout, core, h_0, norm_all, bins) # call this function again, now with specific timestamp
return
# read data from file [timestamp]_net_[time_for_readout].txt
os.chdir(nppath) # change to data directory
try:
connections, h, z, v = readWeightMatrixData(timestamp + "_net_" + time_for_readout + ".txt", Nl_exc)
z = h_0*z # normalize z
w = h + z # compute total synaptic weight
except ValueError:
raise
except OSError:
raise
# determine subpopulations
N_tot = Nl_exc**2 # total number of neurons
N_CA = len(core) # number of neurons in the cell assembly
N_control = N_tot - N_CA # number of neurons in the control subpopulation
all = np.arange(N_tot)
noncore = all[np.logical_not(np.in1d(all, core))] # array of indices of the neurons not in the cell assembly (core)
block_CA_within = | np.ones((N_CA, N_CA), dtype=bool) | numpy.ones |
import csv
import os
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def smooth(inp):
assert len(inp.shape) == 2
output = np.empty(inp.shape)
for i in range(inp.shape[0]):
output[i] = np.mean(inp[max(i-3,0):i+1,:], axis=0)
return output
def extract_one_file(filename, starting_key, iter_key):
if not os.path.isfile(filename):
raise IOError("file '" + str(filename) + "' does not exist")
output = []
wallclock = [0.]
divergence = []
with open(filename, 'r') as f:
reader = csv.reader(f, delimiter=' ')
while True:
try:
line = reader.next()
if line[0]=='train' and line[1] == 'disc':
divergence[-1].append(-1 * float(line[3]))
if line[0]==iter_key:
wallclock[-1] += float(line[5])
if line[0]==starting_key:
output.append([])
if len(divergence) > 0:
divergence[-1] = np.mean(np.array(divergence[-1]))
divergence.append([])
wallclock.append(wallclock[-1])
for i in range(len(line)-1):
try:
output[-1].append(float(line[i+1]))
except ValueError:
print('strange line = ', line[i+1], type(line[i+1]))
except csv.Error:
pass
except StopIteration:
break
return smooth(np.array(output)), wallclock[0:len(output)], divergence[0:-1]
def wall(js, wallclock, divergence, js_index, mode):
if mode == 'walltime':
x = wallclock
y = js[:,js_index-1]
elif mode == 'iterations':
x = np.arange(len(js[:,js_index-1]))
y = js[:,js_index-1]
elif mode == 'divergence':
x = np.arange(len(divergence))
y = divergence
else:
raise ValueError('unknow mode = ', mode)
return x,y
def walltime_interpolate(all_x, all_y, mode):
if mode=='iterations':
out_x = all_x[0,:]
for i in range(all_x.shape[0]):
assert np.max(np.abs(out_x - all_x[i,:]))==0
return out_x, all_y
shortest_index = np.argmin(all_x[:,all_x.shape[1]-1])
shortest_time = all_x[shortest_index, all_x.shape[1]-1]
out_x = np.linspace(0, shortest_time, int(shortest_time / 20))
out_y = np.empty((all_y.shape[0], len(out_x)), dtype=np.float32)
for i in range(out_y.shape[0]):
out_y[i,:] = np.interp(out_x, all_x[i,:], all_y[i,:])
return out_x / 3600., out_y
def plot(js_index, mode, xlim=None, ylim=None):
legends = []
colors = ['b','r','g','k','m','c','y']
key = 'JS='
iter_key = 'iteration'
'''
js, wallclock = extract_one_file('log_ttur_1.txt', key, iter_key)
x = wall(wallclock, walltime)
l, = plt.plot(x, js[:,js_index-1], linewidth=0.5, label='ttur', color=colors[0])
legends.append(l)
js, wallclock = extract_one_file('log_fogan_3.txt', key, iter_key)
x = wall(wallclock, walltime)
l, = plt.plot(x, js[:,js_index-1], linewidth=0.5, label='fogan_1', color=colors[1])
legends.append(l)
js, wallclock = extract_one_file('log_fogan_2.txt', key, iter_key)
x = wall(wallclock, walltime)
l, = plt.plot(x, js[:,js_index-1], linewidth=0.5, label='fogan_2', color=colors[2])
legends.append(l)
'''
#js, wallclock, divergence = extract_one_file('log_ttur_batch_norm_1.txt', key, iter_key)
#x,y = wall(js, wallclock, divergence, js_index, mode)
#l, = plt.plot(x, y, linewidth=0.5, label='ttur_bn', color=colors[3])
js, wallclock, divergence = extract_one_file('log_ttur_batch_norm_3.txt', key, iter_key)
x,y = wall(js, wallclock, divergence, js_index, mode)
all_x = np.empty((5, len(x)), dtype=np.float32)
all_y = np.empty((5, len(y)), dtype=np.float32)
all_y[0,:] = y
all_x[0,:] = x
js, wallclock, divergence = extract_one_file('log_ttur_batch_norm_4.txt', key, iter_key)
x,y = wall(js, wallclock, divergence, js_index, mode)
all_y[1,:] = y
all_x[1,:] = x
js, wallclock, divergence = extract_one_file('log_ttur_batch_norm_5.txt', key, iter_key)
x,y = wall(js, wallclock, divergence, js_index, mode)
all_y[2,:] = y
all_x[2,:] = x
js, wallclock, divergence = extract_one_file('log_ttur_batch_norm_6.txt', key, iter_key)
x,y = wall(js, wallclock, divergence, js_index, mode)
all_y[3,:] = y
all_x[3,:] = x
js, wallclock, divergence = extract_one_file('log_ttur_batch_norm_7.txt', key, iter_key)
x,y = wall(js, wallclock, divergence, js_index, mode)
all_y[4,:] = y
all_x[4,:] = x
x, all_y = walltime_interpolate(all_x, all_y, mode)
for i in range(all_x.shape[0]):
l, = plt.plot(x, all_y[i,:], linewidth=0.25, label='ttur_bn', color=colors[1])
y_mean = | np.mean(all_y, axis=0) | numpy.mean |
import numpy as np
import cv2
from collections import deque
import pickle
import os
class ImageProcessor:
"""
Class used to process an image for the LaneDetector. Applies both color and gradient thresholding and produces a set of
images (undistored, thresholded and warped) that can be used for debugging.
"""
def __init__(self, calibration_data_file):
# Camera calibration data
calibration_data = self._load_calibration_data(file_path = calibration_data_file)
self.mtx = calibration_data['mtx']
self.dist = calibration_data['dist']
# Gradient and color thresholding parameters
self.sobel_kernel = 5
self.grad_x_thresh = (15, 255) # Sobel x threshold
self.grad_y_thresh = (25, 255) # Sobel y threshold
self.grad_mag_thresh = (40, 255) # Sobel mag threshold
self.grad_dir_thresh = (0.7, 1.3) # Sobel direction range
self.grad_v_thresh = (180, 255) # HSV, V channel threshold to filter gradient
self.r_thresh = (195, 255) # RGB, Red channel threshold
self.s_thresh = (100, 255) # HSL, S channel threshold
self.l_thresh = (195, 255) # HSL, L channel threshold
self.b_thresh = (150, 255) # LAB, B channel threshold
self.v_thresh = (140, 255) # HSV, V channel threshold
# Perspective transformation parameters
# slope = (y2 - y1) / (x2 - x1)
# intercept = y1 - slope * x1
# top left, top right = (570, 470), (722, 470)
# bottom left, bottom right = (220, 720), (1110, 720)
self.persp_src_left_line = (-0.7142857143, 877.142857146) # Slope and intercept for left line
self.persp_src_right_line = (0.6443298969, 4.793814441) # Slope and intercept for right line
self.persp_src_top_pct = 0.645 # Percentage from the top
self.persp_src_bottom_pct = 0.02 # Percentage from bottom
self.persp_dst_x_pct = 0.22 # Destination offset percent
self.persp_src = None
self.persp_dst = None
def _load_calibration_data(self, file_path = os.path.join('camera_cal', 'calibration.p')):
with open(file_path, 'rb') as f:
return pickle.load(f)
def _warp_coordinates(self, img):
if self.persp_src is None or self.persp_dst is None:
cols = img.shape[1]
rows = img.shape[0]
src_top_offset = rows * self.persp_src_top_pct
src_bottom_offset = rows * self.persp_src_bottom_pct
left_slope, left_intercept = self.persp_src_left_line
right_slope, right_intercept = self.persp_src_right_line
top_left = [(src_top_offset - left_intercept) / left_slope, src_top_offset]
top_right = [(src_top_offset - right_intercept) / right_slope, src_top_offset]
bottom_left = [(rows - src_bottom_offset - left_intercept) / left_slope, rows - src_bottom_offset]
bottom_right = [(rows - src_bottom_offset - right_intercept) / right_slope, rows - src_bottom_offset]
#Top left, Top right, Bottom right, Bottom left
src = np.float32([top_left, top_right, bottom_right, bottom_left])
dst_x_offset = cols * self.persp_dst_x_pct
top_left = [dst_x_offset, 0]
top_right = [cols - dst_x_offset, 0]
bottom_left = [dst_x_offset, rows]
bottom_right = [cols - dst_x_offset, rows]
dst = np.float32([top_left, top_right, bottom_right, bottom_left])
self.persp_src = src
self.persp_dst = dst
return self.persp_src, self.persp_dst
def _sobel(self, img, orient = 'x', sobel_kernel = 3):
# Take the derivative in x or y given orient = 'x' or 'y'
if orient == 'x':
sobel = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize = sobel_kernel)
else:
sobel = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize = sobel_kernel)
return sobel
def _apply_thresh(self, img, thresh = [0, 255]):
result = np.zeros_like(img)
result[(img >= thresh[0]) & (img <= thresh[1])] = 1
return result
def unwarp_image(self, img):
img_shape = img.shape[1::-1]
src, dst = self._warp_coordinates(img)
warp_m = cv2.getPerspectiveTransform(dst, src)
unwarped = cv2.warpPerspective(img, warp_m, img_shape)
return unwarped
def warp_image(self, img):
img_shape = img.shape[1::-1]
src, dst = self._warp_coordinates(img)
warp_m = cv2.getPerspectiveTransform(src, dst)
warped = cv2.warpPerspective(img, warp_m, img_shape)
return warped
def undistort_image(self, img):
return cv2.undistort(img, self.mtx, self.dist, None, self.mtx)
def sobel_abs_thresh(self, sobel, thresh=[0,255]):
# Take the absolute value of the derivative or gradient
abs_sobel = np.absolute(sobel)
# Scale to 8-bit (0 - 255) then convert to type = np.uint8
scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel))
binary_output = self._apply_thresh(scaled_sobel, thresh)
return binary_output
def sobel_mag_thresh(self, sobel_x, sobel_y, thresh=(0, 255)):
# Calculate the gradient magnitude
gradmag = np.sqrt(sobel_x**2 + sobel_y**2)
# Rescale to 8 bit
scale_factor = np.max(gradmag)/255
gradmag = (gradmag/scale_factor).astype(np.uint8)
binary_output = self._apply_thresh(gradmag, thresh)
return binary_output
def sobel_dir_thresh(self, sobel_x, sobel_y, thresh=(0, np.pi/2)):
# Take the absolute value of the x and y gradients
abs_sobel_x = np.absolute(sobel_x)
abs_sobel_y = np.absolute(sobel_y)
# Calculate the direction of the gradient
abs_grad_dir = np.arctan2(abs_sobel_y, abs_sobel_x)
binary_output = self._apply_thresh(abs_grad_dir, thresh)
return binary_output
def gradient_thresh(self, img):
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
v_ch = hsv_img[:,:,2]
v_binary = self._apply_thresh(v_ch, self.grad_v_thresh)
sobel_x = self._sobel(gray_img, sobel_kernel = self.sobel_kernel, orient = 'x')
sobel_y = self._sobel(gray_img, sobel_kernel = self.sobel_kernel, orient = 'y')
sobel_x_binary = self.sobel_abs_thresh(sobel_x, thresh = self.grad_x_thresh)
sobel_y_binary = self.sobel_abs_thresh(sobel_y, thresh = self.grad_y_thresh)
sobel_mag_binary = self.sobel_mag_thresh(sobel_x, sobel_y, thresh = self.grad_mag_thresh)
sobel_dir_binary = self.sobel_dir_thresh(sobel_x, sobel_y, thresh = self.grad_dir_thresh)
sobel_binary = np.zeros_like(sobel_x_binary)
sobel_binary[(((sobel_x_binary == 1) & (sobel_y_binary == 1)) | (sobel_dir_binary == 1)) & (sobel_mag_binary == 1) & (v_binary == 1)] = 1
return sobel_binary
def color_thresh(self, img):
hls_img = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lab_img = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
r_ch = img[:,:,2]
r_binary = self._apply_thresh(r_ch, self.r_thresh)
l_ch = hls_img[:,:,1]
l_binary = self._apply_thresh(l_ch, self.l_thresh)
s_ch = hls_img[:,:,2]
s_binary = self._apply_thresh(s_ch, self.s_thresh)
b_ch = lab_img[:,:,2]
b_binary = self._apply_thresh(b_ch, self.b_thresh)
v_ch = hsv_img[:,:,2]
v_binary = self._apply_thresh(v_ch, self.v_thresh)
result = np.zeros_like(s_binary)
# B and V for yellow, R and L for white, S and V for both
result[((b_binary == 1) & (v_binary == 1)) | ((r_binary == 1) & (l_binary == 1)) | ((s_binary == 1) & (v_binary == 1))] = 1
return result
def threshold_image(self, img):
gradient_binary = self.gradient_thresh(img)
color_binary = self.color_thresh(img)
result = np.zeros_like(gradient_binary)
result[(gradient_binary == 1) | (color_binary) == 1] = 255
return result
def process_image(self, img):
"""
Process the given image appling undistorsion from the camera calibration data, thresholds the result and then
warps the image for an bird-eye view of the road.
"""
undistorted_img = self.undistort_image(img)
thresholded_img = self.threshold_image(undistorted_img)
warped_img = self.warp_image(thresholded_img)
return undistorted_img, thresholded_img, warped_img
class LaneDetector:
"""
The class is used to detect road lanes in processed (from img_processor) frames, using a sliding window
through convolutions to detect hot pixels. For each slice extracts the centroids found in the windows and
fits a polynomial to compute the curvature and deviation from center. The same polynomial can be used to draw
the lines in the frame. The final centroids returned by the pipeline are averaged among last X frames to smooth
the result.
"""
FAIL_CODES = {
1: 'Lane distance out of range',
2: 'Lane distance deviates from mean',
3: 'Lane distance deviates from previous frame',
4: 'Low left lane confidence',
5: 'Low right lane confidence',
9: 'Low lanes confidence'
}
def __init__(self, window_width = 30, window_height = 80, margin = 35, smooth_frames = 15, xm = 3.7/700, ym = 3/110):
"""
Initializes the class with the given parameters for the windows. Note that if smooth_frames is zero no interpolation is
performed between frames.
Parameters
window_width: The width of the sliding window
window_height: The height of the sliding window
margin: Left/right margin that is used by the sliding window in subsequent layers
smooth_frames: The number of frames to use for smoothing the result of the detection
xm: The number of meters per pixel on the horizontal axis
ym: The number of meters per pixel on the vertical axis
"""
# [(left, right, y)]
self.centroids_buffer = deque(maxlen = smooth_frames)
self.last_lanes_distance = None
self.window_width = window_width
self.window_height = window_height
self.margin = margin
self.first_window_height = .75 # The height for the first window (for the start of the lane at the bottom)
self.min_points_fit = 4 # Number of point already found before trying to fit a line when no center is detected
self.min_confidence = 0.16 # Min confidence to keep a detected lane
self.dist_thresh = (510, 890) # Lanes distance threshold
self.max_dist_diff = 60 # Max lanes distance difference between frames
self.max_dist_mean_dev = 80 # Max lanes distance deviation from mean
self.xm = xm
self.ym = ym
self.min_conv_signal = 1000 # Min conv signal to avoid noise
self.max_window_signal = None # Cache for the max amount of signal in a window to compute confidence
def compute_window_max_signal(self, window, width, height, max_value = 255):
"""
Returns the maximum amount of signal in a window with the given dimension, given the value for each pixel
"""
window_sum = np.sum(np.ones((height, width)) * max_value, axis = 0)
conv_signal = np.convolve(window, window_sum)
return np.max(conv_signal)
def detect_lanes(self, img):
"""
Detection pipeline: Starts out with detecting the bottom lanes using a bigger window for the convolution. The
centroids found at this stage are used as base for the next layer (look around the margin). For each layer estimates
the correctness of the detected centroids and tries to detect failures based on the confidence (given by the amount of
signal in each window) and the distance between lanes (and the mean of the previous lanes if smoothing is enabled).
Parameters
img: The input image, must be a processed image from the ImageProcessor
Returns
lanes_centroids: The centroids for the detected lanes
(left_fit, right_fit): The left and right polynomial coefficients from the lanes_centroids
(left_curvature, right_curvature): The curvature in meters
deviation: The deviation from the center of the lane
fail_code: 0 if the lanes could be detected from this frame, otherwise a code that can be mapped in the FAIL_CODES dictionary
Note that if the detection was not successful the lanes_centroids and the fits are the one from the previous frame
"""
lanes_centroids = []
centroids_confidence = []
window = np.ones(self.window_width)
if self.max_window_signal is None:
self.max_window_signal = self.compute_window_max_signal(window, self.window_width, self.window_height)
left_center, left_confidence, right_center, right_confidence, center_y = self.estimate_start_centroids(img, window)
# Add what we found for the first layer
lanes_centroids.append((left_center, right_center, center_y))
centroids_confidence.append((left_confidence, right_confidence))
# Go through each layer looking for max pixel locations
for level in range(1, (int)(img.shape[0] / self.window_height)):
left_center, left_confidence, right_center, right_confidence, center_y = self.estimate_centroids(img, window, level, left_center, right_center, lanes_centroids)
lanes_centroids.append((left_center, right_center, center_y))
centroids_confidence.append((left_confidence, right_confidence))
lanes_centroids = np.array(lanes_centroids)
centroids_confidence = np.array(centroids_confidence)
fail_code = self.detect_failure(lanes_centroids, centroids_confidence)
# If the lane detection failed and we have frames uses the last one
if fail_code > 0 and len(self.centroids_buffer) > 0:
lanes_centroids = self.centroids_buffer[-1]
self.centroids_buffer.append(lanes_centroids)
if len(self.centroids_buffer) > 0:
self.last_lanes_distance = self.compute_mean_distance(lanes_centroids[:,0], lanes_centroids[:,1])
# Average frames for smoothing
lanes_centroids = np.average(self.centroids_buffer, axis = 0)
left_fit, right_fit = self.lanes_fit(lanes_centroids)
left_fit_scaled, right_fit_scaled = self.lanes_fit(lanes_centroids, ym = self.ym, xm = self.xm)
curvature = self.compute_curvature(left_fit_scaled, right_fit_scaled, np.max(lanes_centroids[:,:2]) * self.ym)
deviation = self.compute_deviation(left_fit_scaled, right_fit_scaled, img.shape[0] * self.ym, img.shape[1] * self.xm)
return lanes_centroids, (left_fit, right_fit), curvature, deviation, fail_code
def estimate_start_centroids(self, img, window):
"""
Estimates the centroids at the bottom of the image, if some frames are buffered uses the previous frames
to define a boundary.
Parameters
img: Input image, must be processed from the ImageProcessor
window: The base window used in the convolutions within a frame
"""
if len(self.centroids_buffer) > 0:
# If a "good" start was found already, limit the search within the previous
# frame start boundaries
prev_centroids = np.array(self.centroids_buffer)
prev_left_centroids = prev_centroids[:,:,0]
prev_right_centroids = prev_centroids[:,:,1]
left_min_index = int(max(np.min(prev_left_centroids) - self.margin, 0))
left_max_index = int(min(np.max(prev_left_centroids) + self.margin, img.shape[1]))
right_min_index = int(max(np.min(prev_right_centroids) - self.margin, 0))
right_max_index = int(min(np.max(prev_right_centroids) + self.margin, img.shape[1]))
else:
left_min_index = 0
left_max_index = int(img.shape[1] / 2)
right_min_index = int(img.shape[1] / 2)
right_max_index = img.shape[1]
window_top = int(img.shape[0] * self.first_window_height)
window_y = int(img.shape[0] - self.window_height / 2)
left_sum = np.sum(img[window_top:, left_min_index:left_max_index], axis=0)
left_signal = np.convolve(window, left_sum)
left_center, left_confidence = self.get_conv_center(left_signal, left_min_index, max_signal = None)
right_sum = | np.sum(img[window_top:, right_min_index:right_max_index], axis=0) | numpy.sum |
from __future__ import print_function
import itertools
import math
import os
import random
import shutil
import tempfile
import unittest
import uuid
import numpy as np
import tensorflow as tf
import coremltools
import coremltools.models.datatypes as datatypes
from coremltools.models import _MLMODEL_FULL_PRECISION, _MLMODEL_HALF_PRECISION
from coremltools.models import neural_network as neural_network
from coremltools.models.utils import macos_version
from coremltools.models.neural_network import flexible_shape_utils
np.random.seed(10)
MIN_MACOS_VERSION_REQUIRED = (10, 13)
LAYERS_10_15_MACOS_VERSION = (10, 15)
def _get_unary_model_spec(x, mode, alpha=1.0):
input_dim = x.shape
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_unary(name='unary', input_name='data',
output_name='output', mode=mode, alpha=alpha)
return builder.spec
class CorrectnessTest(unittest.TestCase):
def runTest(self):
pass
def _compare_shapes(self, np_preds, coreml_preds):
return np.squeeze(np_preds).shape == np.squeeze(coreml_preds).shape
def _compare_nd_shapes(self, np_preds, coreml_preds, shape=()):
if shape:
return coreml_preds.shape == shape
else:
return coreml_preds.shape == np_preds.shape
def _compare_predictions(self, np_preds, coreml_preds, delta=.01):
np_preds = np_preds.flatten()
coreml_preds = coreml_preds.flatten()
for i in range(len(np_preds)):
max_den = max(1.0, np_preds[i], coreml_preds[i])
if np.abs(
np_preds[i] / max_den - coreml_preds[i] / max_den) > delta:
return False
return True
@staticmethod
def _compare_moments(model, inputs, expected, use_cpu_only=True, num_moments=10):
"""
This utility function is used for validate random distributions layers.
It validates the first 10 moments of prediction and expected values.
"""
def get_moment(data, k):
return np.mean(np.power(data - np.mean(data), k))
if isinstance(model, str):
model = coremltools.models.MLModel(model)
model = coremltools.models.MLModel(model, useCPUOnly=use_cpu_only)
prediction = model.predict(inputs, useCPUOnly=use_cpu_only)
for output_name in expected:
np_preds = expected[output_name]
coreml_preds = prediction[output_name]
np_moments = [get_moment(np_preds.flatten(), k) for k in range(num_moments)]
coreml_moments = [get_moment(coreml_preds.flatten(), k) for k in range(num_moments)]
np.testing.assert_almost_equal(np_moments, coreml_moments, decimal=2)
# override expected values to allow element-wise compares
for output_name in expected:
expected[output_name] = prediction[output_name]
def _test_model(self,
model,
input,
expected,
model_precision=_MLMODEL_FULL_PRECISION,
useCPUOnly=False,
output_name_shape_dict={},
validate_shapes_only=False):
model_dir = None
# if we're given a path to a model
if isinstance(model, str):
model = coremltools.models.MLModel(model)
# If we're passed in a specification, save out the model
# and then load it back up
elif isinstance(model, coremltools.proto.Model_pb2.Model):
model_dir = tempfile.mkdtemp()
model_name = str(uuid.uuid4()) + '.mlmodel'
model_path = os.path.join(model_dir, model_name)
coremltools.utils.save_spec(model, model_path)
model = coremltools.models.MLModel(model, useCPUOnly=useCPUOnly)
# If we want to test the half precision case
if model_precision == _MLMODEL_HALF_PRECISION:
model = coremltools.utils.convert_neural_network_weights_to_fp16(
model)
prediction = model.predict(input, useCPUOnly=useCPUOnly)
for output_name in expected:
if self.__class__.__name__ == "SimpleTest":
assert (self._compare_shapes(expected[output_name],
prediction[output_name]))
else:
if output_name in output_name_shape_dict:
output_shape = output_name_shape_dict[output_name]
else:
output_shape = []
if len(output_shape) == 0 and len(expected[output_name].shape) == 0:
output_shape = (1,)
assert (self._compare_nd_shapes(expected[output_name],
prediction[output_name],
output_shape))
if not validate_shapes_only:
assert (self._compare_predictions(expected[output_name],
prediction[output_name]))
# Remove the temporary directory if we created one
if model_dir and os.path.exists(model_dir):
shutil.rmtree(model_dir)
@unittest.skipIf(macos_version() < MIN_MACOS_VERSION_REQUIRED,
'macOS 10.13+ is required. Skipping tests.')
class SimpleTest(CorrectnessTest):
def test_tiny_upsample_linear_mode(self):
input_dim = (1, 1, 3) # (C,H,W)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_upsample(name='upsample',
scaling_factor_h=2, scaling_factor_w=3,
input_name='data', output_name='output',
mode='BILINEAR')
input = {
'data': np.reshape(np.array([1.0, 2.0, 3.0]), (1, 1, 3))
}
expected = {
'output': np.array(
[[1, 1.333, 1.666, 2, 2.333, 2.666, 3, 3, 3],
[1, 1.333, 1.6666, 2, 2.33333, 2.6666, 3, 3, 3]
])
}
self._test_model(builder.spec, input, expected)
def test_LRN(self):
input_dim = (1, 3, 3)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_lrn(name='lrn', input_name='data', output_name='output',
alpha=2, beta=3, local_size=1, k=8)
input = {
'data': np.ones((1, 3, 3))
}
expected = {
'output': 1e-3 * np.ones((1, 3, 3))
}
self._test_model(builder.spec, input, expected)
def test_MVN(self):
input_dim = (2, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_mvn(name='mvn', input_name='data', output_name='output',
across_channels=False, normalize_variance=False)
input = {
'data': np.reshape(np.arange(8, dtype=np.float32), (2, 2, 2))
}
expected = {
'output': np.reshape(np.arange(8) - np.array(
[1.5, 1.5, 1.5, 1.5, 5.5, 5.5, 5.5, 5.5]), (2, 2, 2))
}
self._test_model(builder.spec, input, expected)
def test_L2_normalize(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_l2_normalize(name='mvn', input_name='data',
output_name='output')
input = {
'data': np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
}
expected = {
'output': np.reshape(np.arange(4, dtype=np.float32),
(1, 2, 2)) / np.sqrt(14)
}
self._test_model(builder.spec, input, expected)
def test_unary_sqrt(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.sqrt(x)}
spec = _get_unary_model_spec(x, 'sqrt')
self._test_model(spec, input, expected)
def test_unary_rsqrt(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 1 / np.sqrt(x)}
spec = _get_unary_model_spec(x, 'rsqrt')
self._test_model(spec, input, expected)
def test_unary_inverse(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 1 / x}
spec = _get_unary_model_spec(x, 'inverse')
self._test_model(spec, input, expected)
def test_unary_power(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x ** 3}
spec = _get_unary_model_spec(x, 'power', 3)
self._test_model(spec, input, expected)
def test_unary_exp(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.exp(x)}
spec = _get_unary_model_spec(x, 'exp')
self._test_model(spec, input, expected)
def test_unary_log(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.log(x)}
spec = _get_unary_model_spec(x, 'log')
self._test_model(spec, input, expected)
def test_unary_abs(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.abs(x)}
spec = _get_unary_model_spec(x, 'abs')
self._test_model(spec, input, expected)
def test_unary_threshold(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.maximum(x, 2)}
spec = _get_unary_model_spec(x, 'threshold', 2)
self._test_model(spec, input, expected)
def test_split(self):
input_dim = (9, 2, 2)
x = np.random.rand(*input_dim)
input_features = [('data', datatypes.Array(*input_dim))]
output_names = []
output_features = []
for i in range(3):
out = 'out_' + str(i)
output_names.append(out)
output_features.append((out, None))
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_split(name='split', input_name='data',
output_names=output_names)
input = {'data': x}
expected = {
'out_0': x[0: 3, :, :],
'out_1': x[3: 6, :, :],
'out_2': x[6: 9, :, :]
}
self._test_model(builder.spec, input, expected)
def test_scale_constant(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_scale(name='scale', W=5, b=45, has_bias=True,
input_name='data', output_name='output')
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 5 * x + 45}
self._test_model(builder.spec, input, expected)
def test_scale_matrix(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
W = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_scale(name='scale', W=W, b=None, has_bias=False,
input_name='data', output_name='output',
shape_scale=[1, 2, 2])
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': W * x}
self._test_model(builder.spec, input, expected)
def test_bias_constant(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_bias(name='bias', b=45, input_name='data',
output_name='output')
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + 45}
self._test_model(builder.spec, input, expected)
def test_bias_matrix(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
b = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_bias(name='bias', b=b, input_name='data',
output_name='output',
shape_bias=[1, 2, 2])
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + b}
self._test_model(builder.spec, input, expected)
def test_load_constant(self, model_precision=_MLMODEL_FULL_PRECISION):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
b = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_load_constant(name='load_constant', output_name='bias',
constant_value=b, shape=[1, 2, 2])
builder.add_elementwise(name='add', input_names=['data', 'bias'],
output_name='output', mode='ADD')
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + b}
self._test_model(builder.spec, input, expected, model_precision)
def test_load_constant_half_precision(self):
self.test_load_constant(model_precision=_MLMODEL_HALF_PRECISION)
def test_min(self):
input_dim = (1, 2, 2)
input_features = [('data_0', datatypes.Array(*input_dim)),
('data_1', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_elementwise(name='min', input_names=['data_0', 'data_1'],
output_name='output', mode='MIN')
x1 = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
x2 = np.reshape(np.arange(2, 6, dtype=np.float32), (1, 2, 2))
input = {'data_0': x1, 'data_1': x2}
expected = {'output': np.minimum(x1, x2)}
self._test_model(builder.spec, input, expected)
def test_conv_same_padding(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
W = np.random.rand(3, 3, 10, 20)
builder.add_convolution(name='conv', kernel_channels=10,
output_channels=20,
height=3, width=3, stride_height=2,
stride_width=2,
border_mode='same', groups=1,
W=W, b=None, has_bias=False,
input_name='data', output_name='output',
same_padding_asymmetry_mode='TOP_LEFT_HEAVY')
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.random.rand(20, 8, 8)}
self._test_model(
builder.spec, input, expected, validate_shapes_only=True)
def test_deconv_valid_padding(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
W = np.random.rand(3, 3, 10, 20)
builder.add_convolution(name='deconv', kernel_channels=10,
output_channels=20,
height=3, width=3, stride_height=2,
stride_width=2,
border_mode='valid', groups=1,
W=W, b=None, has_bias=False,
is_deconv=True,
input_name='data', output_name='output',
padding_top=2, padding_bottom=3,
padding_left=2, padding_right=3)
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.random.rand(20, 26, 26)}
self._test_model(
builder.spec, input, expected, validate_shapes_only=True)
def test_deconv_non_unit_groups(self):
input_dim = (16, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features)
W = np.random.rand(3, 3, 16, 5)
builder.add_convolution(name='deconv', kernel_channels=16,
output_channels=20,
height=3, width=3, stride_height=2,
stride_width=2,
border_mode='valid', groups=4,
W=W, b=None, has_bias=False,
is_deconv=True,
input_name='data', output_name='output',
padding_top=2, padding_bottom=3,
padding_left=2, padding_right=3)
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.random.rand(20, 26, 26)}
self._test_model(
builder.spec, input, expected, validate_shapes_only=True)
def test_linear_activation(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_activation(name='activation',
non_linearity='LINEAR',
input_name='data',
output_name='output', params=[34.0, 67.0])
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': 34.0 * x + 67.0}
self._test_model(builder.spec, input, expected)
def test_padding_constant(self):
input_dim = (1, 2, 3)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features)
builder.add_padding(name='pad',
left=1, right=0, top=2, bottom=0,
value=-1,
input_name='data',
output_name='output')
x = np.reshape(np.array([[1, 2, 3], [4, 5, 6]]), (1, 2, 3)).astype(
np.float32)
input = {'data': x}
y = np.reshape(
np.array([[-1, -1, -1, -1], [-1, -1, -1, -1], [-1, 1, 2, 3],
[-1, 4, 5, 6]]), (1, 4, 4)).astype(np.float32)
expected = {'output': y}
self._test_model(builder.spec, input, expected)
def test_padding_replication(self):
input_dim = (1, 2, 3)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_padding(name='pad',
left=1, top=2,
input_name='data',
output_name='output', padding_type='replication')
x = np.reshape(np.array([[1, 2, 3], [4, 5, 6]]), (1, 2, 3)).astype(
np.float32)
input = {'data': x}
y = np.reshape(np.array([[1, 1, 2, 3], [1, 1, 2, 3], [1, 1, 2, 3],
[4, 4, 5, 6]]), (1, 4, 4)).astype(np.float32)
expected = {'output': y}
self._test_model(builder.spec, input, expected)
def test_reshape_target_shape_3(self):
input_dim = (1, 2, 5) # (C,H,W)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_reshape(name='reshape', input_name='data',
output_name='output', target_shape=(10, 1, 1),
mode=0)
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.reshape(x, (10, 1, 1))}
self._test_model(builder.spec, input, expected)
def test_reshape_target_shape_4(self):
input_dim = (1, 2, 5) # (C,H,W)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_reshape(name='reshape', input_name='data',
output_name='output', target_shape=(1, 10, 1, 1),
mode=0)
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.reshape(x, (1, 10, 1, 1))}
self._test_model(builder.spec, input, expected)
def test_bias_matrix_cpu(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
b = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_bias(name='bias', b=b, input_name='data',
output_name='output',
shape_bias=[1, 2, 2])
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + b}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_linear_activation_cpu(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_activation(name='activation',
non_linearity='LINEAR',
input_name='data',
output_name='output', params=[34.0, 67.0])
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': 34.0 * x + 67.0}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
@unittest.skipIf(macos_version() < LAYERS_10_15_MACOS_VERSION,
'macOS 10.15+ required. Skipping tests.')
class NewLayersSimpleTest(CorrectnessTest):
def test_shape_flexibility_range(self):
input_features = [('data', datatypes.Array(*(3,4)))]
builder = neural_network.NeuralNetworkBuilder(input_features,
[('output', None)], disable_rank5_shape_mapping=True)
builder.add_sin(name='sin', input_name='data', output_name='output')
spec = builder.spec
flexible_shape_utils.set_multiarray_ndshape_range(spec, feature_name='data',
lower_bounds=[1,1], upper_bounds=[-1,5])
shapes = [(3,4), (1,5), (60,5), (22,4), (5,3)]
for s in shapes:
x = np.random.rand(*s)
expected = {'output': np.sin(x)}
self._test_model(spec, {'data': x}, expected, useCPUOnly=True)
@unittest.skip('TO FIX')
def test_shape_flexibility_enumeration(self):
input_features = [('data', datatypes.Array(*(3,4,6)))]
builder = neural_network.NeuralNetworkBuilder(input_features,
[('output', None)], disable_rank5_shape_mapping=True)
builder.add_sin(name='sin', input_name='data', output_name='output')
spec = builder.spec
shapes = [(1, 5, 7), (60, 5, 2), (22, 4, 9), (5, 3, 56)]
flexible_shape_utils.add_multiarray_ndshape_enumeration(spec, feature_name='data', enumerated_shapes=shapes)
shapes.append((3,4,6))
for s in shapes:
x = np.random.rand(*s)
expected = {'output': np.sin(x)}
self._test_model(spec, {'data': x}, expected, useCPUOnly=True)
def test_transpose_cpu(self):
for rank in range(1, 6):
axes = np.random.permutation(rank)
axes = [axis - rank if np.random.choice([True, False]) else axis for axis in axes]
input_shape = np.random.randint(low=2, high=6, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_transpose(name='TransposeND',
axes=axes,
input_name='data',
output_name='output')
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.transpose(x, axes)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_batched_mat_mul_cpu(self):
a_shapes = [(10,), (4, 10), (10,), (10,), (2, 3), (1, 3, 4),
(1, 3, 1, 2, 3), (2, 3, 1, 3, 4)]
b_shapes = [(10,), (10,), (10, 3), (2, 10, 3), (3, 4), (3, 2, 4, 5),
(1, 4, 3, 2), (2, 1, 2, 4, 5)]
out_shapes = [(1, 1), (4, 1), (1, 3), (2, 1, 3), (2, 4), (3, 2, 3, 5),
(1, 3, 4, 2, 2), (2, 3, 2, 3, 5)]
for a_shape, b_shape, outShape in zip(a_shapes, b_shapes, out_shapes):
input_shapes = [a_shape, b_shape]
input_features = [
('A', datatypes.Array(*input_shapes[0])),
('B', datatypes.Array(*input_shapes[1]))
]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_batched_mat_mul(name='batched_mat_mul',
input_names=['A', 'B'],
output_name='output',
transpose_a=False,
transpose_b=False)
a = np.random.rand(*input_shapes[0])
b = np.random.rand(*input_shapes[1])
input = {'A': a, 'B': b}
expected = {'output': np.array(np.matmul(a, b))}
shape_dict = {'output': outShape}
self._test_model(builder.spec, input, expected, useCPUOnly=True,
output_name_shape_dict=shape_dict)
def test_batched_mat_mul_with_transposes_cpu(self):
for transpose_a, transpose_b in itertools.product([True, False],
[True, False]):
a_shape = (3, 4)
b_shape = (4, 5)
a_shape = a_shape[::-1] if transpose_a else a_shape
b_shape = b_shape[::-1] if transpose_b else b_shape
input_shapes = [a_shape, b_shape]
input_features = [
('A', datatypes.Array(*input_shapes[0])),
('B', datatypes.Array(*input_shapes[1]))
]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_batched_mat_mul(
name='BatchedMatMul', input_names=['A', 'B'],
output_name='output', transpose_a=transpose_a,
transpose_b=transpose_b
)
a = np.random.rand(*input_shapes[0])
b = np.random.rand(*input_shapes[1])
inputs = {'A': a, 'B': b}
a = a.T if transpose_a else a
b = b.T if transpose_b else b
expected = {'output': np.matmul(a, b)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=True)
def test_batched_mat_mul_single_input_cpu(
self, model_precision=_MLMODEL_FULL_PRECISION):
X1 = 11
X2 = 23
W = np.random.rand(X1, X2)
bias = np.random.rand(X2)
input_shapes = [(X1,), (5, X1), (2, 3, X1), (4, 1, X1), (12, 5, 8, X1),
(2, 3, 1, 5, X1)]
for input_shape in input_shapes:
x = np.random.rand(*input_shape)
np_out = np.matmul(x, W) + bias
expected = {'output': np_out}
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_batched_mat_mul(name='batched_mat_mul',
input_names=['data'],
output_name='output',
weight_matrix_rows=X1,
weight_matrix_columns=X2,
W=W, bias=bias)
inputs = {'data': x}
self._test_model(
builder.spec, inputs, expected,
model_precision=model_precision, useCPUOnly=True)
def test_batched_mat_mul_single_input_half_precision_cpu(self):
self.test_batched_mat_mul_single_input_cpu(
model_precision=_MLMODEL_HALF_PRECISION)
def test_embedding_nd_cpu(
self, model_precision=_MLMODEL_FULL_PRECISION, use_cpu_only=True):
vocab_size = 10
embedding_size = 19
W = np.random.rand(embedding_size, vocab_size)
input_shapes = [(5, 1), (2, 3, 1), (4, 1, 1), (12, 5, 8, 1),
(2, 3, 1, 5, 1)]
for input_shape in input_shapes:
x = np.random.randint(vocab_size, size=input_shape)
np_out = np.take(np.transpose(W), np.squeeze(x, axis=-1), axis=0)
expected = {'output': np_out}
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_embedding_nd(name='embedding_nd',
input_name='data',
output_name='output',
vocab_size=vocab_size,
embedding_size=embedding_size,
W=W)
input = {'data': x.astype(np.float32)}
self._test_model(
builder.spec, input, expected,
model_precision=model_precision, useCPUOnly=use_cpu_only)
def test_embedding_nd_half_precision_cpu(self):
self.test_embedding_nd_cpu(
model_precision=_MLMODEL_HALF_PRECISION, use_cpu_only=True)
def test_embedding_nd_GPU(self):
self.test_embedding_nd_cpu(
model_precision=_MLMODEL_FULL_PRECISION, use_cpu_only=False)
def test_embedding_nd_half_precision_GPU(self):
self.test_embedding_nd_cpu(
model_precision=_MLMODEL_HALF_PRECISION, use_cpu_only=False)
def test_softmax_nd_cpu(self):
for rank in range(1, 6):
for axis in range(-rank, rank):
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_softmax_nd(name='softmax_nd', input_name='data',
output_name='output', axis=axis)
x = np.random.rand(*input_shape)
input = {'data': x}
y = np.exp(x - np.max(x, axis=axis, keepdims=True))
y = y / np.sum(y, axis=axis, keepdims=True)
expected = {'output': y}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_concat_nd_cpu(self):
for rank in range(1, 6):
for axis in range(-rank, rank):
n_inputs = np.random.choice(range(2, 5))
output_shape = np.random.randint(low=2, high=5, size=rank)
output_shape[axis] = 0
input_shapes = []
input_features = []
input_names = []
for _ in range(n_inputs):
input_shapes.append(np.copy(output_shape))
input_shapes[-1][axis] = np.random.choice(range(2, 8))
output_shape[axis] += input_shapes[-1][axis]
for i, input_dim in enumerate(input_shapes):
input_name = 'input_%s' % str(i)
input_names.append(input_name)
input_features.append((input_name, datatypes.Array(*input_dim)))
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_concat_nd(name='concat_nd', input_names=input_names,
output_name='output', axis=axis)
input_tensors = []
for input_dim in input_shapes:
input_tensors.append(np.random.rand(*input_dim))
input = dict(zip(input_names, input_tensors))
expected = {'output': np.concatenate(input_tensors, axis)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_fill_like_cpu(self):
for rank in range(1, 6):
target_shape = np.random.randint(low=2, high=6, size=rank)
value = float(np.random.rand())
input_features = [('tensor', datatypes.Array(*target_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_fill_like(name='fill_like', input_name='tensor',
output_name='output', value=value)
tensor = np.random.rand(*target_shape)
input = {'tensor': tensor}
expected = {'output': np.zeros(target_shape) + value}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_fill_static_cpu(self):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
value = float(np.random.rand())
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_fill_static(name='fill_static', output_name='tmp',
output_shape=list(shape), value=value)
builder.add_elementwise('add_layer', ['data', 'tmp'], 'output', mode='ADD')
data = np.random.rand(*shape)
input = {'data': data}
expected = {'output': data + value}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_fill_dynamic_cpu(self):
for rank in range(1, 6):
input_shape = np.random.randint(low=2, high=8, size=rank)
value = float(np.random.rand())
input_features = [('shape', datatypes.Array(len(input_shape)))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_fill_dynamic(name='fill_dynamic', input_name='shape',
output_name='output', value=value)
input = {'shape': np.array(input_shape, dtype='float')}
expected = {'output': np.zeros(input_shape) + value}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_broadcast_to_like_cpu(self):
for rank in range(1, 6):
input_shape = np.random.randint(low=2, high=8, size=rank)
mask = [np.random.choice([True, False, False]) for _ in range(rank)]
input_shape = np.where(mask, 1, input_shape)
target_rank = np.random.randint(low=rank, high=6)
target_shape = [np.random.randint(low=2, high=8) if (-i > rank or input_shape[i] == 1)
else input_shape[i] for i in range(-1, -target_rank - 1, -1)][::-1]
input_features = [('data', datatypes.Array(*input_shape)),
('tensor', datatypes.Array(*target_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_broadcast_to_like(name='broadcast_to_like',
input_names=['data', 'tensor'],
output_name='output')
data = np.random.rand(*input_shape)
tensor = np.random.rand(*target_shape)
inputs = {'data': data, 'tensor': tensor}
expected = {'output': np.broadcast_to(data, target_shape)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=True)
def test_broadcast_to_static_cpu(self):
for rank in range(1, 6):
input_shape = np.random.randint(low=2, high=8, size=rank)
mask = [np.random.choice([True, False, False]) for _ in range(rank)]
input_shape = np.where(mask, 1, input_shape)
target_rank = np.random.randint(low=rank, high=6)
target_shape = [np.random.randint(low=2, high=8) if (-i > rank or input_shape[i] == 1)
else input_shape[i] for i in range(-1, -target_rank - 1, -1)][::-1]
input_features = [('data', datatypes.Array(*input_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_broadcast_to_static(name='broadcast_to_static',
input_name='data',
output_name='output',
output_shape=list(target_shape))
data = np.random.rand(*input_shape)
input = {'data': data}
expected = {'output': np.broadcast_to(data, target_shape)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_broadcast_to_dynamic_cpu(self):
for rank in range(1, 6):
input_shape = np.random.randint(low=2, high=8, size=rank)
mask = [np.random.choice([True, False, False]) for _ in range(rank)]
input_shape = np.where(mask, 1, input_shape)
target_rank = np.random.randint(low=rank, high=6)
target_shape = [np.random.randint(low=2, high=8) if (-i > rank or input_shape[i] == 1)
else input_shape[i] for i in range(-1, -target_rank - 1, -1)][::-1]
input_features = [('data', datatypes.Array(*input_shape)),
('shape', datatypes.Array(len(target_shape)))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_broadcast_to_dynamic(name='broadcast_to_dynamic',
input_names=['data', 'shape'],
output_name='output')
data = np.random.rand(*input_shape)
inputs = {'data': data, 'shape': np.array(target_shape, dtype='float')}
expected = {'output': np.broadcast_to(data, target_shape)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=True)
def test_trigonometry_cpu(self):
ops = ['sin', 'cos', 'tan',
'asin', 'acos', 'atan',
'sinh', 'cosh', 'tanh',
'asinh', 'acosh', 'atanh']
for op in ops:
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
x = np.random.rand(*shape)
if op == 'sin':
builder.add_sin(name=op, input_name='data', output_name='output')
expected = {'output': np.sin(x)}
elif op == 'cos':
builder.add_cos(name=op, input_name='data', output_name='output')
expected = {'output': np.cos(x)}
elif op == 'tan':
builder.add_tan(name=op, input_name='data', output_name='output')
expected = {'output': np.tan(x)}
elif op == 'asin':
builder.add_asin(name=op, input_name='data', output_name='output')
expected = {'output': np.arcsin(x)}
elif op == 'acos':
builder.add_acos(name=op, input_name='data', output_name='output')
expected = {'output': np.arccos(x)}
elif op == 'atan':
builder.add_atan(name=op, input_name='data', output_name='output')
expected = {'output': np.arctan(x)}
elif op == 'sinh':
builder.add_sinh(name=op, input_name='data', output_name='output')
expected = {'output': np.sinh(x)}
elif op == 'cosh':
builder.add_cosh(name=op, input_name='data', output_name='output')
expected = {'output': np.cosh(x)}
elif op == 'tanh':
builder.add_tanh(name=op, input_name='data', output_name='output')
expected = {'output': np.tanh(x)}
elif op == 'asinh':
builder.add_asinh(name=op, input_name='data', output_name='output')
expected = {'output': np.arcsinh(x)}
elif op == 'acosh':
x = np.random.choice([10, np.e, 1], tuple(shape)).astype(np.float32)
builder.add_acosh(name=op, input_name='data', output_name='output')
expected = {'output': np.arccosh(x)}
elif op == 'atanh':
builder.add_atanh(name=op, input_name='data', output_name='output')
expected = {'output': np.arctanh(x)}
self._test_model(builder.spec, {'data': x}, expected, useCPUOnly=True)
def test_exp2_cpu(self):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_exp2(name='exp2', input_name='data', output_name='output')
x = np.random.rand(*shape)
input = {'data': x}
expected = {'output': np.exp2(x)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_elementwise_binary_cpu(self):
input_names = ['A', 'B']
test_cases = ['greater', 'less', 'equal', 'not_equal', 'greater_equal',
'less_equal', 'logical_and', 'logical_or', 'logical_xor',
'add', 'subtract', 'multiply', 'divide', 'power',
'maximum', 'minimum', 'floor_divide', 'mod']
for test_case in test_cases:
for _ in range(10):
rank_a = np.random.randint(low=1, high=6)
rank_b = np.random.randint(low=1, high=6)
rank_out = max(rank_a, rank_b)
shape_a = np.random.randint(low=2, high=8, size=rank_a)
shape_b = np.random.randint(low=2, high=8, size=rank_b)
for i in range(-1, -rank_out - 1, -1):
dims = []
if -i <= rank_a: dims.append(shape_a[i])
if -i <= rank_b: dims.append(shape_b[i])
dim = np.random.choice(dims)
if -i <= rank_a: shape_a[i] = np.random.choice([1, dim])
if -i <= rank_b: shape_b[i] = np.random.choice([1, dim])
input_shapes = [shape_a, shape_b]
input_features = [('A', datatypes.Array(*input_shapes[0])),
('B', datatypes.Array(*input_shapes[1]))]
builder = neural_network.NeuralNetworkBuilder(input_features, [
('output', None)], disable_rank5_shape_mapping=True)
func = getattr(np, test_case)
if test_case == 'greater':
builder.add_greater_than(test_case, input_names=input_names,
output_name='output')
elif test_case == 'less':
builder.add_less_than(test_case, input_names=input_names,
output_name='output')
elif test_case == 'equal':
builder.add_equal(test_case, input_names=input_names,
output_name='output')
elif test_case == 'not_equal':
builder.add_not_equal(test_case, input_names=input_names,
output_name='output')
elif test_case == 'greater_equal':
builder.add_greater_than(test_case, input_names=input_names,
output_name='output',
use_greater_than_equal=True)
elif test_case == 'less_equal':
builder.add_less_than(test_case, input_names=input_names,
output_name='output',
use_less_than_equal=True)
elif test_case == 'logical_and':
builder.add_logical(test_case, input_names=input_names,
output_name='output', mode='AND')
elif test_case == 'logical_or':
builder.add_logical(test_case, input_names=input_names,
output_name='output', mode='OR')
elif test_case == 'logical_xor':
builder.add_logical(test_case, input_names=input_names,
output_name='output', mode='XOR')
elif test_case == 'add':
builder.add_add_broadcastable(test_case, input_names=input_names,
output_name='output')
elif test_case == 'subtract':
builder.add_subtract_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'multiply':
builder.add_multiply_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'divide':
builder.add_divide_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'power':
builder.add_pow_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'maximum':
builder.add_max_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'minimum':
builder.add_min_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'floor_divide':
builder.add_floor_div_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'mod':
builder.add_mod_broadcastable(test_case,
input_names=input_names,
output_name='output')
a = np.random.rand(*input_shapes[0])
b = np.random.rand(*input_shapes[1])
input = {'A': a, 'B': b}
expected = {'output': func(a, b, dtype=np.float32)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_elementwise_boolean_unary_cpu(self):
input_names = ['input']
shapes = [(1, 2, 3, 1), (3, 1, 2, 1, 2), (1, 2, 1, 3), (2, 3),
(2, 1, 1), (2, 3, 4), (2, 4), (1,), (1,)]
test_cases = ['greater', 'less', 'equal', 'not_equal', 'greater_equal',
'less_equal']
for test_case in test_cases:
for shape in shapes:
input_features = [('input', datatypes.Array(*shape))]
b = np.random.rand()
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
func = getattr(np, test_case)
if test_case == 'greater':
builder.add_greater_than(test_case, input_names=input_names,
output_name='output', alpha=b)
elif test_case == 'less':
builder.add_less_than(test_case, input_names=input_names,
output_name='output', alpha=b)
elif test_case == 'equal':
builder.add_equal(test_case, input_names=input_names,
output_name='output', alpha=b)
elif test_case == 'not_equal':
builder.add_not_equal(test_case, input_names=input_names,
output_name='output', alpha=b)
elif test_case == 'greater_equal':
builder.add_greater_than(test_case, input_names=input_names,
output_name='output',
use_greater_than_equal=True,
alpha=b)
elif test_case == 'less_equal':
builder.add_less_than(test_case, input_names=input_names,
output_name='output',
use_less_than_equal=True, alpha=b)
a = np.random.rand(*shape)
input = {'input': a}
expected = {'output': func(a, b, dtype=np.float32)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_logical_not_cpu(self):
input_names = ['input']
shapes = [(1, 2, 3, 1), (3, 1, 2, 1, 2), (1, 2, 1, 3), (2, 3),
(2, 1, 1), (2, 3, 4), (2, 4), (1,), (1,)]
for shape in shapes:
input_features = [('input', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_logical('logical_not', input_names=input_names,
output_name='output', mode='NOT')
a = np.random.rand(*shape)
input = {'input': a}
expected = {'output': np.logical_not(a)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_stack_cpu(self):
for input_rank in range(1, 5):
for axis in range(-input_rank - 1, input_rank + 1):
n_inputs = np.random.choice(range(2, 5))
input_shape = np.random.randint(low=2, high=5, size=input_rank)
input_features = []
input_names = []
for i in range(n_inputs):
input_name = 'input_%s' % str(i)
input_names.append(input_name)
input_features.append(
(input_name, datatypes.Array(*input_shape)))
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_stack(name='stack', input_names=input_names,
output_name='output', axis=axis)
input_tensors = []
for _ in range(n_inputs):
input_tensors.append(np.random.rand(*input_shape))
input = dict(zip(input_names, input_tensors))
expected = {'output': np.stack(input_tensors, axis)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_ceil_cpu(self):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_ceil(name='ceil', input_name='data', output_name='output')
x = np.random.rand(*shape)
inputs = {'data': x}
expected = {'output': np.ceil(x)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=True)
def test_floor_cpu(self):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_floor(name='floor', input_name='data', output_name='output')
x = np.random.rand(*shape)
inputs = {'data': x}
expected = {'output': np.floor(x)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=True)
def test_round_cpu(self):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_round(name='round', input_name='data', output_name='output')
x = np.float32(np.random.rand(*shape) * np.random.randint(low=-100, high=101))
inputs = {'data': x}
expected = {'output': np.around(x)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=True)
def test_sign_cpu(self):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_sign(name='sign', input_name='data', output_name='output')
x = np.random.choice([-np.random.rand(1), 0.0, np.random.rand(1)],
tuple(shape)).astype(np.float32)
inputs = {'data': x}
expected = {'output': np.sign(x)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=True)
def test_clip_cpu(self):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=6, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', datatypes.Array(*shape))]
x = np.random.rand(*shape)
min_value = np.percentile(x, 25)
max_value = np.percentile(x, 75)
input = {'data': x}
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_clip(name='clip', input_name='data', output_name='output',
min_value=min_value, max_value=max_value)
expected = {'output': np.clip(x, min_value, max_value)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_split_nd_cpu(self):
for rank in range(1, 6):
for axis in range(-rank, rank):
n_outputs = np.random.choice(range(2, 4))
input_shape = np.random.randint(low=2, high=5, size=rank)
input_shape[axis] = 0
output_shapes = []
output_features = []
output_names = []
almost_equal = random.choice([True, False])
remainder = np.random.choice(
range(1, n_outputs)) if almost_equal else 0
value = np.random.choice(range(2, 5))
for k in range(n_outputs):
output_shapes.append(np.copy(input_shape))
output_shapes[-1][
axis] = value + 1 if k < remainder else value
input_shape[axis] += output_shapes[-1][axis]
for i in range(n_outputs):
output_name = 'output_%s' % str(i)
output_names.append(output_name)
output_features.append(
(output_name, None))
input_features = [('data', datatypes.Array(*input_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_split_nd(name='split_nd', input_name='data',
output_names=output_names, axis=axis,
num_splits=n_outputs)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = dict(
zip(
output_names, np.array_split(x, n_outputs, axis=axis)
if almost_equal else np.split(x, n_outputs, axis=axis)
)
) # Explicitly trying to compare against both versions of numpy split
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_split_nd_with_split_sizes_cpu(self):
for rank in range(1, 6):
for axis in range(-rank, rank):
n_outputs = np.random.choice(range(2, 4))
input_shape = np.random.randint(low=2, high=5, size=rank)
input_shape[axis] = 0
output_shapes, output_features, output_names = [], [], []
sections, split_sizes = [], []
for _ in range(n_outputs):
output_shapes.append(np.copy(input_shape))
output_shapes[-1][axis] = np.random.choice(range(2, 5))
input_shape[axis] += output_shapes[-1][axis]
sections.append(input_shape[axis])
split_sizes.append(output_shapes[-1][axis])
sections.pop()
for i in range(n_outputs):
output_name = 'output_%s' % str(i)
output_names.append(output_name)
output_features.append(
(output_name, None))
input_features = [('data', datatypes.Array(*input_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_split_nd(name='split_nd', input_name='data',
output_names=output_names, axis=axis,
split_sizes=split_sizes)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = dict(
zip(output_names, np.split(x, sections, axis=axis)))
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_slice_static_cpu(self):
for rank in range(1, 6):
for _ in range(200):
input_shape = np.array([5 for _ in range(rank)])
objs, strides, begin_masks, end_ids, end_masks, begin_ids = [], [], [], [], [], []
for dim in range(rank):
stride = random.choice([-3, -1, 1, 2])
begin_mask = random.choice([True, False])
end_mask = random.choice([True, False])
length = 0
while length <= 0:
begin_id = np.random.randint(low=-input_shape[dim],
high=input_shape[dim])
end_id = np.random.randint(low=-input_shape[dim],
high=input_shape[dim])
obj = slice(None if begin_mask else begin_id,
None if end_mask else end_id, stride)
length = np.arange(input_shape[dim])[(obj,)].shape[0]
objs.append(obj), strides.append(stride), begin_masks.append(
begin_mask)
end_masks.append(end_mask), begin_ids.append(
begin_id), end_ids.append(end_id)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_slice_static('slice_static', 'data', 'output',
begin_ids=begin_ids, end_ids=end_ids, strides=strides,
begin_masks=begin_masks, end_masks=end_masks)
x = np.random.rand(*input_shape)
inputs = {'data': x}
expected = {'output': x[tuple(objs)]}
self._test_model(builder.spec, inputs, expected, useCPUOnly=True)
def test_slice_dynamic_cpu(self):
for rank in range(1, 6):
input_shape = np.array([5 for _ in range(rank)])
objs, strides, begin_masks, end_ids, end_masks, begin_ids = [], [], [], [], [], []
for dim in range(rank):
stride = random.choice([-3, -1, 1, 2])
begin_mask = random.choice([True, False])
end_mask = random.choice([True, False])
length = 0
while length <= 0:
begin_id = np.random.randint(low=-input_shape[dim],
high=input_shape[dim])
end_id = np.random.randint(low=-input_shape[dim],
high=input_shape[dim])
obj = slice(None if begin_mask else begin_id,
None if end_mask else end_id, stride)
length = np.arange(input_shape[dim])[(obj,)].shape[0]
objs.append(obj), strides.append(stride), begin_masks.append(
begin_mask)
end_masks.append(end_mask), begin_ids.append(
begin_id), end_ids.append(end_id)
# test different number of inputs, from 2 inputs up to 6 inputs
# when num_inputs == 2, begin_ids are inputs, rest are read from parameters
# when num_inputs == 6, all read from inputs, none are read from parameters
for num_inputs in [2, 3, 4, 5, 6]:
x = np.random.rand(*input_shape)
input_features = [('data', datatypes.Array(*input_shape))]
input_names = ['data']
inputs = dict()
inputs['data'] = x
if num_inputs == 2:
input_features = [('data', datatypes.Array(*input_shape)),
('begin_ids', datatypes.Array(len(begin_ids)))]
input_names = ['data', 'begin_ids']
inputs['begin_ids'] = np.array(begin_ids, dtype=np.int32)
elif num_inputs == 3:
input_features = [('data', datatypes.Array(*input_shape)),
('begin_ids', datatypes.Array(len(begin_ids))),
('end_ids', datatypes.Array(len(end_ids)))]
input_names = ['data', 'begin_ids', 'end_ids']
inputs['begin_ids'] = np.array(begin_ids, dtype=np.int32)
inputs['end_ids'] = np.array(end_ids, dtype=np.int32)
elif num_inputs == 4:
input_features = [('data', datatypes.Array(*input_shape)),
('begin_ids', datatypes.Array(len(begin_ids))),
('end_ids', datatypes.Array(len(end_ids))),
('strides', datatypes.Array(len(strides)))]
input_names = ['data', 'begin_ids', 'end_ids', 'strides']
inputs['begin_ids'] = | np.array(begin_ids, dtype=np.int32) | numpy.array |
"""
Experiment summary
------------------
Treat each province/state in a country cases over time
as a vector, do a simple K-Nearest Neighbor between
countries. What country has the most similar trajectory
to a given country?
"""
import sys
sys.path.insert(0, '..')
from utils import data
import os
import sklearn
import numpy as np
import json
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
# ------------ HYPERPARAMETERS -------------
BASE_PATH = '../COVID-19/csse_covid_19_data/'
MIN_CASES = 500000
# ------------------------------------------
deaths = os.path.join(
BASE_PATH,
'csse_covid_19_time_series',
'time_series_covid19_deaths_global.csv')
deaths = data.load_csv_data(deaths)
confirmed = os.path.join(
BASE_PATH,
'csse_covid_19_time_series',
'time_series_covid19_confirmed_global.csv')
confirmed = data.load_csv_data(confirmed)
fig = plt.figure(figsize=(12, 12))
ax = fig.add_subplot(111)
cm = plt.get_cmap('jet')
NUM_COLORS = 1
LINE_STYLES = ['solid', 'dashed', 'dotted']
NUM_STYLES = len(LINE_STYLES)
for val in np.unique(confirmed["Country/Region"]):
df = data.filter_by_attribute(
confirmed, "Country/Region", val)
cases, labels = data.get_cases_chronologically(df)
cases = cases.sum(axis=0)
df_deaths = data.filter_by_attribute(
deaths, "Country/Region", val)
death_cases, death_labels = data.get_cases_chronologically(df_deaths)
death_cases = death_cases.sum(axis=0)
#if cases[-1] > MIN_CASES:
if labels[0,1] == "US":
NUM_COLORS += 1
colors = [cm(i) for i in np.linspace(0, 1, NUM_COLORS)]
legend = []
handles = []
# basically, for country in countries in our list of confirmed
for val in | np.unique(confirmed["Country/Region"]) | numpy.unique |
"""
Produce figures for a single trained network.
"""
# imports
import numpy as np
import scipy.stats
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.ticker import MultipleLocator, AutoMinorLocator
import fly_rec as rec
import utilities as util
import copy
from cycler import cycler
from astropy.stats.circstats import circmean
import os
from pathlib import Path
# Simulation options
sim_run = "2Enough"
t_run = util.sim_time(sim_run)
vel_gain = True # Whether to compute and plot the neural velocity gain plot
vel_hist = False # Whether to plot velocity histograms
PI_err = True # Whether to produce histogram of errors after PI in darkness
cut_exc = False # Whether to exterminate extended positive sidelobes or not
perturb_conn = False # Add noise to final connectivity to account for biological irregularities
save = False # Whether to save results
n_hist = 1001 # Number of bins for angular velocity histograms
prop_std = .2 # Noise level as a fraction of variability in connectivity
width = 13 # Width of extermination
offset = 4 # Offset for start of extermination
n_PI = 1000 # Number of examples for PI in darkness error
PI_dur = np.arange(10,70,10) # Duration of PI in darkness segments, in sec
avg = 20 # How many snapshots to average position of bump on
data_dir = '\\savefiles\\trained_networks\\'
PI_example_dir = '\\savefiles\\PI_example.npz'
# Parameters
params = {
'dt': 5*10**(-4), # euler integration step size
'n_neu': 60, # number of HD neurons
'v0': 2, # vestibular input offset
'v_max': 720, # maximum angular velocity
'M': 4, # visual receptive field magnitude
'sigma': .15, # visual receptive field width
'inh': - 1, # global inhibitory input to HD neurons
'inh_rot': - 1.5, # global inhibitory input to rotation neurons
'every_perc': 1, # store value and show update this often
'avg_err': 10, # segment is s in which to compute current error of network
'n_sigma': 0, # input noise standard deviation
'exc': 4, # global excitation to soma of HD cells
'run': 0, # number of run for many runs of same simulation
'tau_s': 65, # synaptic delay in the network, in ms
't_run': t_run, # real time for which the network is run
'sim_run': sim_run, # designation corresponding to above time
'gain': 1, # neural velocity gain to entrain
'sigma_v': 225, # standard deviation of velocity noise in OU process
'vary_w_rot': False, # whether to add variability in the HD to HR connections
'adj': False, # whether HD neurons project to adjacent HR neurons
'rand_w_rot': False, # whether to randomly generate the HD to HR connections
'filt': True, # whether to filter the learning dynamics
'tau_d': 100, # synaptic plasticity time constant
'x0': 1, # input level for 50% of activation function
'beta': 2.5, # steepness of activation function
'gD': 2, # coupling conductance
'gL': 1, # leak conductance
'fmax': .15, # maximum firing rate in kHz (if saturated)
'eta': 5e-2 # learning rate
}
tau_tot = params['tau_s']/1000 + 0.01
if params['gain']<0:
inv = True
else:
inv = False
# load simulation results
data_path = str(Path(os.getcwd()).parent) + data_dir
filename = "fly_rec" + util.filename(params)
network = np.load(data_path+filename+'.npz',allow_pickle=True)
w = network['w'][:,:,-1]
try:
w_rot = network['w_rot']
except:
w_rot = None
W = network['w']
error = network['err']
stored = []
try:
hd_v = network['hd_v']; neu_v_dark = network['neu_v_dark']
neu_v_day = network['neu_v_day']; stored.append('neu_v')
except:
hd_v = np.nan; neu_v_dark = np.nan; neu_v_day = np.nan
try:
PI_errors = network['PI_errors'];
if not np.all(np.isnan(PI_errors)):
stored.append('PI_err')
except:
PI_errors = np.nan
# Constants
n_neu = np.size(w,0)
bump_size = int(n_neu/12)
dphi = 720/n_neu
theor_lim = 80/tau_tot
actual_lim = 1100
colormap = 'seismic'
fly_max = 500
# Fontsize appropriate for plots
SMALL_SIZE = 8
MEDIUM_SIZE = 10
BIGGER_SIZE = 12
plt.rc('font', size=MEDIUM_SIZE) # controls default text sizes
plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=MEDIUM_SIZE) # fontsize of the figure title
# Visual input
th = np.linspace(-np.pi,np.pi,1000)
theta0 = 0
r = util.vis_in(th,theta0,params['M'],params['sigma'],-params['exc']-1,day=True)
fig, ax = plt.subplots(figsize=(1.5,1))
plt.plot(th*180/np.pi,r,c='dodgerblue',linewidth=2)
plt.ylabel('Visual input')
plt.xlabel('Offset from current HD ($\degree$)')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_position(('data', -190))
ax.spines['bottom'].set_position(('data', -5.2))
ax.xaxis.set_major_locator(MultipleLocator(180))
ax.xaxis.set_minor_locator(MultipleLocator(90))
ax.set_yticks([-1,-3,-5])
ax.yaxis.set_minor_locator(MultipleLocator(1))
plt.xlim([-180,180])
# Vestibular input
vel = np.array([720,0,-360])
col_cycle = cycler('color', ['green','dodgerblue','darkorange'])
th = np.linspace(-180,180,int(n_neu/2))
k = params['v0']/params['v_max']
sign = np.ones(n_neu)
sign[int(n_neu/2):n_neu] = -1
v = k*np.dot(vel[:,np.newaxis],sign[np.newaxis,:])
fig, (ax1, ax2) = plt.subplots(1,2,sharex=True,sharey=True,figsize=(3,1.5))
ax1.set_prop_cycle(col_cycle)
ax1.plot(th,v[:,0:int(n_neu/2)].T,linewidth=2)
ax1.set_xlabel('L-HR')
ax1.set_ylabel('Velocity input')
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.spines['left'].set_position(('data', -190))
ax1.spines['bottom'].set_position(('data', -2.2))
ax1.xaxis.set_major_locator(MultipleLocator(180))
ax1.xaxis.set_minor_locator(MultipleLocator(90))
ax1.yaxis.set_major_locator(MultipleLocator(2))
ax1.yaxis.set_minor_locator(MultipleLocator(1))
ax1.set_xlim([-180,180])
ax2.set_prop_cycle(col_cycle)
ax2.plot(th,v[:,int(n_neu/2):].T,linewidth=2)
ax2.set_xlabel('R-HR')
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.spines['left'].set_position(('data', -190))
ax2.spines['bottom'].set_position(('data', -2.2))
fig.text(0.55, 0.04, 'Heading ($\degree$)', ha='center', va='center')
fig.text(0.55, .93, '720 $\degree$/s', ha='right', va='center', fontsize = SMALL_SIZE)
fig.text(0.55, .68, '0 $\degree$/s', ha='right', va='center', fontsize = SMALL_SIZE)
fig.text(0.55, .57, '-360 $\degree$/s', ha='right', va='center', fontsize = SMALL_SIZE)
plt.tight_layout()
# Average error history
t = np.linspace(0,t_run,100)/3600
error_hist = np.mean(error,axis = 0)*1000
fig, ax = plt.subplots(figsize=(3,2))
plt.plot(t,error_hist,color = 'dodgerblue',linewidth=2)
plt.ylabel('Absolute error (spikes/s)')
plt.xlabel('Training time (hours)')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_position(('data', -.4))
ax.spines['bottom'].set_position(('data', -.2))
plt.xlim([0,t[-1]])
plt.ylim([0,np.max(error_hist)])
# Plot weight matrices
w_hr = w[:,0:n_neu]; w_rec = w[:,n_neu:2*n_neu]
ticks = np.array([0,int(n_neu/2),n_neu-1])
ticks = ticks.astype(np.int)
r_ticks = [ticks[0]+1,ticks[1]+1,ticks[2]+1]
# Recurrent connections
vmax = np.max(w_rec); vmin = np.min(w_rec)
norm = matplotlib.colors.TwoSlopeNorm(vmin=vmin,vcenter=0,vmax=vmax)
fig, ax = plt.subplots(figsize = (3.5,2.5))
im = ax.imshow(w_rec, cmap = colormap,vmax = vmax, vmin = vmin, norm = norm)
ax.set_title('$W^{rec}$')
ax.set_ylabel('# of postsynaptic neuron \n HD')
ax.set_xlabel('HD \n # of presynaptic neuron')
ax.set_xticks(ticks)
ax.set_xticklabels(r_ticks)
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_minor_locator(AutoMinorLocator())
plt.yticks(ticks,r_ticks)
ax2 = fig.add_axes([.1,ax.get_position().y0,.85,ax.get_position().y1-ax.get_position().y0])
fig.colorbar(im,ticks = [np.ceil(vmin),0,np.floor(vmax)])
plt.axis('off')
fig.text(.95, (ax.get_position().y0+ax.get_position().y1)/2,
'Synaptic strength', ha='center', va='center', rotation='vertical')
# HR to HD connections
vmax = np.max(w_hr); vmin = np.min(w_hr)
norm = matplotlib.colors.TwoSlopeNorm(vmin=vmin,vcenter=0,vmax=vmax)
fig, ax = plt.subplots(figsize = (3.5,2.5))
im = ax.imshow(w_hr, cmap = colormap,vmax = vmax, vmin = vmin, norm = norm)
ax.set_title('$W^{HR}$')
ax.set_ylabel('# of postsynaptic neuron \n HD')
ax.set_xlabel('L-HR R-HR \n # of presynaptic neuron')
ax.set_xticks(ticks)
ax.set_xticklabels(r_ticks)
plt.yticks(ticks,r_ticks)
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_minor_locator(AutoMinorLocator())
ax2 = fig.add_axes([.1,ax.get_position().y0,.85,ax.get_position().y1-ax.get_position().y0])
fig.colorbar(im,ticks = [np.ceil(vmin),0,np.floor(vmax)])
plt.axis('off')
fig.text(.95, (ax.get_position().y0+ax.get_position().y1)/2,
'Synaptic strength', ha='center', va='center', rotation='vertical')
# HD to HR connections
if w_rot is not None:
vmax = np.max(w_rot); vmin = np.min(w_rot)
norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
fig, ax = plt.subplots(figsize = (3.5,2.5))
im = ax.imshow(w_rot, cmap = 'Reds',vmax = vmax, vmin = vmin)
ax.set_title('$W^{HD}$')
ax.set_ylabel('# of postsynaptic neuron \nL-HR R-HR')
ax.set_xlabel('HD \n # of presynaptic neuron')
ax.set_xticks(ticks)
ax.set_xticklabels(r_ticks)
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_minor_locator(AutoMinorLocator())
plt.yticks(ticks,r_ticks)
ax2 = fig.add_axes([.1,ax.get_position().y0,.85,ax.get_position().y1-ax.get_position().y0])
fig.colorbar(im,ticks = [np.ceil(vmin),0,np.floor(vmax)])
plt.axis('off')
fig.text(.95, (ax.get_position().y0+ax.get_position().y1)/2,
'Synaptic strength', ha='center', va='center', rotation='vertical')
# Plot weight profile history
percs = np.arange(1,100)
w_rec_hist = np.zeros((n_neu,percs.size)); w_rec_hist_norm = np.zeros((n_neu,percs.size))
w_l_hr_hist = np.zeros((n_neu,percs.size)); w_r_hr_hist = np.zeros((n_neu,percs.size));
w_l_hr_hist_norm = np.zeros((n_neu,percs.size)); w_r_hr_hist_norm = np.zeros((n_neu,percs.size))
for i, perc in enumerate(percs):
# Get weight matrices at specific time in training
temp1 = copy.deepcopy(W[:,n_neu:(2*n_neu),perc])
temp2 = copy.deepcopy(W[:,0:n_neu,perc])
# Average weights over neurons
for j in range(n_neu):
# Displace the rows so that the weight matrices can be averaged across
# receptive field difference
if j%2:
temp1[:,j] = np.roll(temp1[:,j],int(n_neu/2-j+1))
else:
temp1[:,j] = np.roll(temp1[:,j],int(n_neu/2-j))
temp2[:,j] = np.roll(temp2[:,j],int(n_neu/2-2*j))
# Mean and std of weights
hd_slice = np.mean(temp1,axis=1); l_hr_slice = np.mean(temp2[:,0:int(n_neu/2)],axis=1)
r_hr_slice = np.mean(temp2[:,int(n_neu/2):n_neu],axis=1)
hd_sd = np.mean(np.std(temp1,axis=1)); l_hr_sd = np.mean(np.std(temp2[:,0:int(n_neu/2)],axis=1))
r_hr_sd = np.mean(np.std(temp2[:,int(n_neu/2):n_neu],axis=1))
w_rec_hist[:,i] = hd_slice; w_l_hr_hist[:,i] = l_hr_slice
w_r_hr_hist[:,i] = r_hr_slice
# History of recurrent connections
x_ticks_hist = np.round(t_run*np.array([1,10,100])/100/3600,1)
ticks_HD = np.linspace(0,n_neu,5)
ticks_HD_less = np.linspace(0,n_neu,3)
t_ticks_more = [-180,-90,0,90,180]
ticks_HR = np.linspace(0,int(n_neu/2),3)
t_ticks_less = [-180,0,180]
vmax = np.max(w_rec_hist); vmin = np.min(w_rec_hist)
norm = matplotlib.colors.TwoSlopeNorm(vmin=vmin,vcenter=0,vmax=vmax)
fig, ax = plt.subplots(figsize=(8/3,2))
im = ax.imshow(w_rec_hist, cmap = colormap,vmax = vmax, vmin = vmin, norm = norm, aspect='auto')
ax.set_title('$W^{rec}$')
ax.set_ylabel('Receptive field difference ($\degree$)')
ax.set_xlabel('Training time (hours)')
plt.yticks(ticks_HD,t_ticks_more)
ax.set_xscale('log')
ax.set_xlim([1,100])
ax.set_xticks([1,10,100])
ax.set_xticklabels(x_ticks_hist)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_position(('data', 61))
ax2 = fig.add_axes([.1,.1,1,.8])
fig.colorbar(im,ticks = [np.ceil(vmin),0,np.floor(vmax)])
plt.axis('off')
fig.text(1.1, (ax.get_position().y0+ax.get_position().y1)/2,
'Synaptic strength', ha='center', va='center', rotation='vertical')
# History of HR to HD connections
vmax = np.max(np.concatenate((w_l_hr_hist,w_r_hr_hist))); vmin = np.min(np.concatenate((w_l_hr_hist,w_r_hr_hist)))
norm = matplotlib.colors.TwoSlopeNorm(vmin=vmin,vcenter=0,vmax=vmax)
fig = plt.figure(figsize=(8/3,2))
ax1 = plt.subplot(211)
im = ax1.imshow(w_l_hr_hist, cmap = colormap,vmax = vmax, vmin = vmin, norm = norm, aspect='auto')
ax1.set_title('$W^{HR}$')
ax1.set_ylabel('L-HR')
plt.yticks(ticks_HD_less,t_ticks_less)
ax1.set_xscale('log')
ax1.set_xlim([1,100])
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.spines['bottom'].set_position(('data', 62))
ax1.set_xticklabels([])
ax1.yaxis.set_minor_locator(MultipleLocator(15))
ax2 = plt.subplot(212)
im = ax2.imshow(w_r_hr_hist, cmap = colormap,vmax = vmax, vmin = vmin, norm = norm, aspect='auto')
ax2.set_xlabel('Training time (hours)')
ax2.set_ylabel('R-HR')
plt.yticks(ticks_HD_less,t_ticks_less)
ax2.yaxis.set_minor_locator(MultipleLocator(15))
ax2.set_xscale('log')
ax2.set_xlim([1,100])
ax2.set_xticks([1,10,100])
ax2.set_xticklabels(x_ticks_hist)
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.spines['bottom'].set_position(('data', 62))
plt.subplots_adjust(hspace=0.25)
ax3 = fig.add_axes([.1,.1,1,.8])
fig.colorbar(im,ticks = [ | np.ceil(vmin) | numpy.ceil |
"""
These tools are probably there because I needed very quick test of relatively computationally expensive shits that will get ported to cpp when I'll have time to actually compile stuffs.
best,
Boris
"""
import numpy as np
import numba as nb
import math
M_PI = np.pi
# @nb.jit(nopython = True)
# def thin_perimeter_for_salesman(X,Y,distance_thinning):
# new_index = np.zeros(X.shape[0])
# return new_index
@nb.jit(nopython = True)
def travelling_salesman_algortihm(X,Y):
is_visited = np.zeros(X.shape[0])
temp_distance = np.zeros(X.shape[0])
new_index = np.zeros(X.shape[0], dtype = np.int32)
new_index[0] = 0
is_visited[0] = 1
that_index = 0
for i in range(1,new_index.shape[0]):
that_x = X[that_index]
that_y = Y[that_index]
min_distance = 1e32
min_id = -9999
#compute distance
temp_distance[that_index] = min_distance
for j in range(new_index.shape[0]):
if(j!= that_index):
temp_distance[j] = math.sqrt(abs(X[j] - that_x)**2 + abs(Y[j] - that_y) **2)
if(temp_distance[j] < min_distance and is_visited[j] == 0):
min_distance = temp_distance[j]
min_id = j
new_index[i] = min_id
that_index = min_id
is_visited[that_index] = 1
return new_index
@nb.jit(nopython = True)
def remove_outliers_in_drainage_divide(Dx, Dy, threshold):
indices = np.full(Dx.shape[0], True)
is_outlier = False
last = 0
for i in range(Dx.shape[0]):
if(Dx[i]>threshold and Dy[i]>threshold):
is_outlier = True
if(is_outlier):
indices[i] = False
return indices
@nb.jit(nopython=False, parallel = True)
def average_from_grid_n_stuff_maybe_adaptative_and_all(X,Y,Z,window):
"""
Take X,Y,Z and a window size. Return an array of resampled stuffs from that window size.
Not sure how it exacty works
B.G.
"""
# First getting the minimum and maximum
Xminimum = np.min(X)
Yminimum = np.min(Y)
Xmaximum = np.max(X)
Ymaximum = | np.max(Y) | numpy.max |
# Copyright 2019, <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import math
import numpy as np
import nibabel as nib
import cv2
import vtk
import pandas as pd
import matplotlib.pyplot as plt
from vtk.util import numpy_support
from scipy import interpolate
import skimage
from ukbb_cardiac.common.image_utils import *
def approximate_contour(contour, factor=4, smooth=0.05, periodic=False):
""" Approximate a contour.
contour: input contour
factor: upsampling factor for the contour
smooth: smoothing factor for controling the number of spline knots.
Number of knots will be increased until the smoothing
condition is satisfied:
sum((w[i] * (y[i]-spl(x[i])))**2, axis=0) <= s
which means the larger s is, the fewer knots will be used,
thus the contour will be smoother but also deviating more
from the input contour.
periodic: set to True if this is a closed contour, otherwise False.
return the upsampled and smoothed contour
"""
# The input contour
N = len(contour)
dt = 1.0 / N
t = np.arange(N) * dt
x = contour[:, 0]
y = contour[:, 1]
# Pad the contour before approximation to avoid underestimating
# the values at the end points
r = int(0.5 * N)
t_pad = np.concatenate((np.arange(-r, 0) * dt, t, 1 + np.arange(0, r) * dt))
if periodic:
x_pad = np.concatenate((x[-r:], x, x[:r]))
y_pad = np.concatenate((y[-r:], y, y[:r]))
else:
x_pad = np.concatenate((np.repeat(x[0], repeats=r), x, np.repeat(x[-1], repeats=r)))
y_pad = np.concatenate((np.repeat(y[0], repeats=r), y, np.repeat(y[-1], repeats=r)))
# Fit the contour with splines with a smoothness constraint
fx = interpolate.UnivariateSpline(t_pad, x_pad, s=smooth * len(t_pad))
fy = interpolate.UnivariateSpline(t_pad, y_pad, s=smooth * len(t_pad))
# Evaluate the new contour
N2 = N * factor
dt2 = 1.0 / N2
t2 = np.arange(N2) * dt2
x2, y2 = fx(t2), fy(t2)
contour2 = np.stack((x2, y2), axis=1)
return contour2
def sa_pass_quality_control(seg_sa_name):
""" Quality control for short-axis image segmentation """
nim = nib.load(seg_sa_name)
seg_sa = nim.get_data()
X, Y, Z = seg_sa.shape[:3]
# Label class in the segmentation
label = {'LV': 1, 'Myo': 2, 'RV': 3}
# Criterion 1: every class exists and the area is above a threshold
# Count number of pixels in 3D
for l_name, l in label.items():
pixel_thres = 10
if np.sum(seg_sa == l) < pixel_thres:
print('{0}: The segmentation for class {1} is smaller than {2} pixels. '
'It does not pass the quality control.'.format(seg_sa_name, l_name, pixel_thres))
return False
# Criterion 2: number of slices with LV segmentations is above a threshold
# and there is no missing segmentation in between the slices
z_pos = []
for z in range(Z):
seg_z = seg_sa[:, :, z]
endo = (seg_z == label['LV']).astype(np.uint8)
myo = (seg_z == label['Myo']).astype(np.uint8)
pixel_thres = 10
if (np.sum(endo) < pixel_thres) or (np.sum(myo) < pixel_thres):
continue
z_pos += [z]
n_slice = len(z_pos)
slice_thres = 6
if n_slice < slice_thres:
print('{0}: The segmentation has less than {1} slices. '
'It does not pass the quality control.'.format(seg_sa_name, slice_thres))
return False
if n_slice != (np.max(z_pos) - np.min(z_pos) + 1):
print('{0}: There is missing segmentation between the slices. '
'It does not pass the quality control.'.format(seg_sa_name))
return False
# Criterion 3: LV and RV exists on the mid-cavity slice
_, _, cz = [np.mean(x) for x in np.nonzero(seg_sa == label['LV'])]
z = int(round(cz))
seg_z = seg_sa[:, :, z]
endo = (seg_z == label['LV']).astype(np.uint8)
endo = get_largest_cc(endo).astype(np.uint8)
myo = (seg_z == label['Myo']).astype(np.uint8)
myo = remove_small_cc(myo).astype(np.uint8)
epi = (endo | myo).astype(np.uint8)
epi = get_largest_cc(epi).astype(np.uint8)
rv = (seg_z == label['RV']).astype(np.uint8)
rv = get_largest_cc(rv).astype(np.uint8)
pixel_thres = 10
if np.sum(epi) < pixel_thres or np.sum(rv) < pixel_thres:
print('{0}: Can not find LV epi or RV to determine the AHA '
'coordinate system.'.format(seg_sa_name))
return False
return True
def la_pass_quality_control(seg_la_name):
""" Quality control for long-axis image segmentation """
nim = nib.load(seg_la_name)
seg = nim.get_data()
X, Y, Z = seg.shape[:3]
seg_z = seg[:, :, 0]
# Label class in the segmentation
label = {'LV': 1, 'Myo': 2, 'RV': 3, 'LA': 4, 'RA': 5}
for l_name, l in label.items():
# Criterion 1: every class exists and the area is above a threshold
pixel_thres = 10
if np.sum(seg_z == l) < pixel_thres:
print('{0}: The segmentation for class {1} is smaller than {2} pixels. '
'It does not pass the quality control.'.format(seg_la_name, l_name, pixel_thres))
return False
# Criterion 2: the area is above a threshold after connected component analysis
endo = (seg_z == label['LV']).astype(np.uint8)
endo = get_largest_cc(endo).astype(np.uint8)
myo = (seg_z == label['Myo']).astype(np.uint8)
myo = remove_small_cc(myo).astype(np.uint8)
epi = (endo | myo).astype(np.uint8)
epi = get_largest_cc(epi).astype(np.uint8)
pixel_thres = 10
if np.sum(endo) < pixel_thres or np.sum(myo) < pixel_thres or np.sum(epi) < pixel_thres:
print('{0}: Can not find LV endo, myo or epi to extract the long-axis '
'myocardial contour.'.format(seg_la_name))
return False
return True
def determine_aha_coordinate_system(seg_sa, affine_sa):
""" Determine the AHA coordinate system using the mid-cavity slice
of the short-axis image segmentation.
"""
# Label class in the segmentation
label = {'BG': 0, 'LV': 1, 'Myo': 2, 'RV': 3}
# Find the mid-cavity slice
_, _, cz = [np.mean(x) for x in np.nonzero(seg_sa == label['LV'])]
z = int(round(cz))
seg_z = seg_sa[:, :, z]
endo = (seg_z == label['LV']).astype(np.uint8)
endo = get_largest_cc(endo).astype(np.uint8)
myo = (seg_z == label['Myo']).astype(np.uint8)
myo = remove_small_cc(myo).astype(np.uint8)
epi = (endo | myo).astype(np.uint8)
epi = get_largest_cc(epi).astype(np.uint8)
rv = (seg_z == label['RV']).astype(np.uint8)
rv = get_largest_cc(rv).astype(np.uint8)
# Extract epicardial contour
_, contours, _ = cv2.findContours(cv2.inRange(epi, 1, 1), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
epi_contour = contours[0][:, 0, :]
# Find the septum, which is the intersection between LV and RV
septum = []
dilate_iter = 1
while len(septum) == 0:
# Dilate the RV till it intersects with LV epicardium.
# Normally, this is fulfilled after just one iteration.
rv_dilate = cv2.dilate(rv, np.ones((3, 3), dtype=np.uint8), iterations=dilate_iter)
dilate_iter += 1
for y, x in epi_contour:
if rv_dilate[x, y] == 1:
septum += [[x, y]]
# The middle of the septum
mx, my = septum[int(round(0.5 * len(septum)))]
point_septum = np.dot(affine_sa, np.array([mx, my, z, 1]))[:3]
# Find the centre of the LV cavity
cx, cy = [np.mean(x) for x in np.nonzero(endo)]
point_cavity = np.dot(affine_sa, np.array([cx, cy, z, 1]))[:3]
# Determine the AHA coordinate system
axis = {}
axis['lv_to_sep'] = point_septum - point_cavity
axis['lv_to_sep'] /= np.linalg.norm(axis['lv_to_sep'])
axis['apex_to_base'] = np.copy(affine_sa[:3, 2])
axis['apex_to_base'] /= np.linalg.norm(axis['apex_to_base'])
if axis['apex_to_base'][2] < 0:
axis['apex_to_base'] *= -1
axis['inf_to_ant'] = np.cross(axis['apex_to_base'], axis['lv_to_sep'])
return axis
def determine_aha_part(seg_sa, affine_sa, three_slices=False):
""" Determine the AHA part for each slice. """
# Label class in the segmentation
label = {'BG': 0, 'LV': 1, 'Myo': 2, 'RV': 3}
# Sort the z-axis positions of the slices with both endo and epicardium
# segmentations
X, Y, Z = seg_sa.shape[:3]
z_pos = []
for z in range(Z):
seg_z = seg_sa[:, :, z]
endo = (seg_z == label['LV']).astype(np.uint8)
myo = (seg_z == label['Myo']).astype(np.uint8)
pixel_thres = 10
if (np.sum(endo) < pixel_thres) or (np.sum(myo) < pixel_thres):
continue
z_pos += [(z, np.dot(affine_sa, np.array([X / 2.0, Y / 2.0, z, 1]))[2])]
z_pos = sorted(z_pos, key=lambda x: -x[1])
# Divide the slices into three parts: basal, mid-cavity and apical
n_slice = len(z_pos)
part_z = {}
if three_slices:
# Select three slices (basal, mid and apical) for strain analysis, inspired by:
#
# [1] <NAME>, et al. Myocardial strain measurement with
# feature-tracking cardiovascular magnetic resonance: normal values.
# European Heart Journal - Cardiovascular Imaging, (2015) 16, 871-881.
#
# [2] <NAME>, et al. Cardiovascular magnetic resonance feature-
# tracking assessment of myocardial mechanics: Intervendor agreement
# and considerations regarding reproducibility. Clinical Radiology
# 70 (2015), 989-998.
# Use the slice at 25% location from base to apex.
# Avoid using the first one or two basal slices, as the myocardium
# will move out of plane at ES due to longitudinal motion, which will
# be a problem for 2D in-plane motion tracking.
z = int(round((n_slice - 1) * 0.25))
part_z[z_pos[z][0]] = 'basal'
# Use the central slice.
z = int(round((n_slice - 1) * 0.5))
part_z[z_pos[z][0]] = 'mid'
# Use the slice at 75% location from base to apex.
# In the most apical slices, the myocardium looks blurry and
# may not be suitable for motion tracking.
z = int(round((n_slice - 1) * 0.75))
part_z[z_pos[z][0]] = 'apical'
else:
# Use all the slices
i1 = int(math.ceil(n_slice / 3.0))
i2 = int(math.ceil(2 * n_slice / 3.0))
i3 = n_slice
for i in range(0, i1):
part_z[z_pos[i][0]] = 'basal'
for i in range(i1, i2):
part_z[z_pos[i][0]] = 'mid'
for i in range(i2, i3):
part_z[z_pos[i][0]] = 'apical'
return part_z
def determine_aha_segment_id(point, lv_centre, aha_axis, part):
""" Determine the AHA segment ID given a point,
the LV cavity center and the coordinate system.
"""
d = point - lv_centre
x = np.dot(d, aha_axis['inf_to_ant'])
y = np.dot(d, aha_axis['lv_to_sep'])
deg = math.degrees(math.atan2(y, x))
seg_id = 0
if part == 'basal':
if (deg >= -30) and (deg < 30):
seg_id = 1
elif (deg >= 30) and (deg < 90):
seg_id = 2
elif (deg >= 90) and (deg < 150):
seg_id = 3
elif (deg >= 150) or (deg < -150):
seg_id = 4
elif (deg >= -150) and (deg < -90):
seg_id = 5
elif (deg >= -90) and (deg < -30):
seg_id = 6
else:
print('Error: wrong degree {0}!'.format(deg))
exit(0)
elif part == 'mid':
if (deg >= -30) and (deg < 30):
seg_id = 7
elif (deg >= 30) and (deg < 90):
seg_id = 8
elif (deg >= 90) and (deg < 150):
seg_id = 9
elif (deg >= 150) or (deg < -150):
seg_id = 10
elif (deg >= -150) and (deg < -90):
seg_id = 11
elif (deg >= -90) and (deg < -30):
seg_id = 12
else:
print('Error: wrong degree {0}!'.format(deg))
exit(0)
elif part == 'apical':
if (deg >= -45) and (deg < 45):
seg_id = 13
elif (deg >= 45) and (deg < 135):
seg_id = 14
elif (deg >= 135) or (deg < -135):
seg_id = 15
elif (deg >= -135) and (deg < -45):
seg_id = 16
else:
print('Error: wrong degree {0}!'.format(deg))
exit(0)
elif part == 'apex':
seg_id = 17
else:
print('Error: unknown part {0}!'.format(part))
exit(0)
return seg_id
def evaluate_wall_thickness(seg_name, output_name_stem, part=None):
""" Evaluate myocardial wall thickness. """
# Read the segmentation image
nim = nib.load(seg_name)
Z = nim.header['dim'][3]
affine = nim.affine
seg = nim.get_data()
# Label class in the segmentation
label = {'BG': 0, 'LV': 1, 'Myo': 2, 'RV': 3}
# Determine the AHA coordinate system using the mid-cavity slice
aha_axis = determine_aha_coordinate_system(seg, affine)
# Determine the AHA part of each slice
part_z = {}
if not part:
part_z = determine_aha_part(seg, affine)
else:
part_z = {z: part for z in range(Z)}
# Construct the points set to represent the endocardial contours
endo_points = vtk.vtkPoints()
thickness = vtk.vtkDoubleArray()
thickness.SetName('Thickness')
points_aha = vtk.vtkIntArray()
points_aha.SetName('Segment ID')
point_id = 0
lines = vtk.vtkCellArray()
# Save epicardial contour for debug and demonstration purposes
save_epi_contour = False
if save_epi_contour:
epi_points = vtk.vtkPoints()
points_epi_aha = vtk.vtkIntArray()
points_epi_aha.SetName('Segment ID')
point_epi_id = 0
lines_epi = vtk.vtkCellArray()
# For each slice
for z in range(Z):
# Check whether there is endocardial segmentation and it is not too small,
# e.g. a single pixel, which either means the structure is missing or
# causes problem in contour interpolation.
seg_z = seg[:, :, z]
endo = (seg_z == label['LV']).astype(np.uint8)
endo = get_largest_cc(endo).astype(np.uint8)
myo = (seg_z == label['Myo']).astype(np.uint8)
myo = remove_small_cc(myo).astype(np.uint8)
epi = (endo | myo).astype(np.uint8)
epi = get_largest_cc(epi).astype(np.uint8)
pixel_thres = 10
if (np.sum(endo) < pixel_thres) or (np.sum(myo) < pixel_thres):
continue
# Calculate the centre of the LV cavity
# Get the largest component in case we have a bad segmentation
cx, cy = [np.mean(x) for x in np.nonzero(endo)]
lv_centre = np.dot(affine, np.array([cx, cy, z, 1]))[:3]
# Extract endocardial contour
# Note: cv2 considers an input image as a Y x X array, which is different
# from nibabel which assumes a X x Y array.
_, contours, _ = cv2.findContours(cv2.inRange(endo, 1, 1), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
endo_contour = contours[0][:, 0, :]
# Extract epicardial contour
_, contours, _ = cv2.findContours(cv2.inRange(epi, 1, 1), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
epi_contour = contours[0][:, 0, :]
# Smooth the contours
endo_contour = approximate_contour(endo_contour, periodic=True)
epi_contour = approximate_contour(epi_contour, periodic=True)
# A polydata representation of the epicardial contour
epi_points_z = vtk.vtkPoints()
for y, x in epi_contour:
p = np.dot(affine, np.array([x, y, z, 1]))[:3]
epi_points_z.InsertNextPoint(p)
epi_poly_z = vtk.vtkPolyData()
epi_poly_z.SetPoints(epi_points_z)
# Point locator for the epicardial contour
locator = vtk.vtkPointLocator()
locator.SetDataSet(epi_poly_z)
locator.BuildLocator()
# For each point on endocardium, find the closest point on epicardium
N = endo_contour.shape[0]
for i in range(N):
y, x = endo_contour[i]
# The world coordinate of this point
p = np.dot(affine, np.array([x, y, z, 1]))[:3]
endo_points.InsertNextPoint(p)
# The closest epicardial point
q = np.array(epi_points_z.GetPoint(locator.FindClosestPoint(p)))
# The distance from endo to epi
dist_pq = np.linalg.norm(q - p)
# Add the point data
thickness.InsertNextTuple1(dist_pq)
seg_id = determine_aha_segment_id(p, lv_centre, aha_axis, part_z[z])
points_aha.InsertNextTuple1(seg_id)
# Record the first point of the current contour
if i == 0:
contour_start_id = point_id
# Add the line
if i == (N - 1):
lines.InsertNextCell(2, [point_id, contour_start_id])
else:
lines.InsertNextCell(2, [point_id, point_id + 1])
# Increment the point index
point_id += 1
if save_epi_contour:
# For each point on epicardium
N = epi_contour.shape[0]
for i in range(N):
y, x = epi_contour[i]
# The world coordinate of this point
p = np.dot(affine, np.array([x, y, z, 1]))[:3]
epi_points.InsertNextPoint(p)
seg_id = determine_aha_segment_id(p, lv_centre, aha_axis, part_z[z])
points_epi_aha.InsertNextTuple1(seg_id)
# Record the first point of the current contour
if i == 0:
contour_start_id = point_epi_id
# Add the line
if i == (N - 1):
lines_epi.InsertNextCell(2, [point_epi_id, contour_start_id])
else:
lines_epi.InsertNextCell(2, [point_epi_id, point_epi_id + 1])
# Increment the point index
point_epi_id += 1
# Save to a vtk file
endo_poly = vtk.vtkPolyData()
endo_poly.SetPoints(endo_points)
endo_poly.GetPointData().AddArray(thickness)
endo_poly.GetPointData().AddArray(points_aha)
endo_poly.SetLines(lines)
writer = vtk.vtkPolyDataWriter()
output_name = '{0}.vtk'.format(output_name_stem)
writer.SetFileName(output_name)
writer.SetInputData(endo_poly)
writer.Write()
if save_epi_contour:
epi_poly = vtk.vtkPolyData()
epi_poly.SetPoints(epi_points)
epi_poly.GetPointData().AddArray(points_epi_aha)
epi_poly.SetLines(lines_epi)
writer = vtk.vtkPolyDataWriter()
output_name = '{0}_epi.vtk'.format(output_name_stem)
writer.SetFileName(output_name)
writer.SetInputData(epi_poly)
writer.Write()
# Evaluate the wall thickness per AHA segment and save to a csv file
table_thickness = np.zeros(17)
np_thickness = numpy_support.vtk_to_numpy(thickness).astype(np.float32)
np_points_aha = numpy_support.vtk_to_numpy(points_aha).astype(np.int8)
for i in range(16):
table_thickness[i] = np.mean(np_thickness[np_points_aha == (i + 1)])
table_thickness[-1] = np.mean(np_thickness)
index = [str(x) for x in np.arange(1, 17)] + ['Global']
df = pd.DataFrame(table_thickness, index=index, columns=['Thickness'])
df.to_csv('{0}.csv'.format(output_name_stem))
def extract_myocardial_contour(seg_name, contour_name_stem, part=None, three_slices=False):
""" Extract the myocardial contours, including both endo and epicardial contours.
Determine the AHA segment ID for all the contour points.
By default, part is None. This function will automatically determine the part
for each slice (basal, mid or apical).
If part is given, this function will use the given part for the image slice.
"""
# Read the segmentation image
nim = nib.load(seg_name)
X, Y, Z = nim.header['dim'][1:4]
affine = nim.affine
seg = nim.get_data()
# Label class in the segmentation
label = {'BG': 0, 'LV': 1, 'Myo': 2, 'RV': 3}
# Determine the AHA coordinate system using the mid-cavity slice
aha_axis = determine_aha_coordinate_system(seg, affine)
# Determine the AHA part of each slice
part_z = {}
if not part:
part_z = determine_aha_part(seg, affine, three_slices=three_slices)
else:
part_z = {z: part for z in range(Z)}
# For each slice
for z in range(Z):
# Check whether there is the endocardial segmentation
seg_z = seg[:, :, z]
endo = (seg_z == label['LV']).astype(np.uint8)
endo = get_largest_cc(endo).astype(np.uint8)
myo = (seg_z == label['Myo']).astype(np.uint8)
myo = remove_small_cc(myo).astype(np.uint8)
epi = (endo | myo).astype(np.uint8)
epi = get_largest_cc(epi).astype(np.uint8)
pixel_thres = 10
if (np.sum(endo) < pixel_thres) or (np.sum(myo) < pixel_thres):
continue
# Check whether this slice is going to be analysed
if z not in part_z.keys():
continue
# Construct the points set and data arrays to represent both endo and epicardial contours
points = vtk.vtkPoints()
points_radial = vtk.vtkFloatArray()
points_radial.SetName('Direction_Radial')
points_radial.SetNumberOfComponents(3)
points_label = vtk.vtkIntArray()
points_label.SetName('Label')
points_aha = vtk.vtkIntArray()
points_aha.SetName('Segment ID')
point_id = 0
lines = vtk.vtkCellArray()
lines_aha = vtk.vtkIntArray()
lines_aha.SetName('Segment ID')
lines_dir = vtk.vtkIntArray()
lines_dir.SetName('Direction ID')
# Calculate the centre of the LV cavity
# Get the largest component in case we have a bad segmentation
cx, cy = [np.mean(x) for x in np.nonzero(endo)]
lv_centre = np.dot(affine, np.array([cx, cy, z, 1]))[:3]
# Extract epicardial contour
_, contours, _ = cv2.findContours(cv2.inRange(epi, 1, 1), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
epi_contour = contours[0][:, 0, :]
epi_contour = approximate_contour(epi_contour, periodic=True)
N = epi_contour.shape[0]
for i in range(N):
y, x = epi_contour[i]
# The world coordinate of this point
p = np.dot(affine, np.array([x, y, z, 1]))[:3]
points.InsertNextPoint(p[0], p[1], p[2])
# The radial direction from the cavity centre to this point
d_rad = p - lv_centre
d_rad = d_rad / np.linalg.norm(d_rad)
points_radial.InsertNextTuple3(d_rad[0], d_rad[1], d_rad[2])
# Record the type of the point (1 = endo, 2 = epi)
points_label.InsertNextTuple1(2)
# Record the AHA segment ID
seg_id = determine_aha_segment_id(p, lv_centre, aha_axis, part_z[z])
points_aha.InsertNextTuple1(seg_id)
# Record the first point of the current contour
if i == 0:
contour_start_id = point_id
# Add the circumferential line
if i == (N - 1):
lines.InsertNextCell(2, [point_id, contour_start_id])
else:
lines.InsertNextCell(2, [point_id, point_id + 1])
lines_aha.InsertNextTuple1(seg_id)
# Line direction (1 = radial, 2 = circumferential, 3 = longitudinal)
lines_dir.InsertNextTuple1(2)
# Increment the point index
point_id += 1
# Point locator
epi_points = vtk.vtkPoints()
epi_points.DeepCopy(points)
epi_poly = vtk.vtkPolyData()
epi_poly.SetPoints(epi_points)
locator = vtk.vtkPointLocator()
locator.SetDataSet(epi_poly)
locator.BuildLocator()
# Extract endocardial contour
# Note: cv2 considers an input image as a Y x X array, which is different
# from nibabel which assumes a X x Y array.
_, contours, _ = cv2.findContours(cv2.inRange(endo, 1, 1), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
endo_contour = contours[0][:, 0, :]
endo_contour = approximate_contour(endo_contour, periodic=True)
N = endo_contour.shape[0]
for i in range(N):
y, x = endo_contour[i]
# The world coordinate of this point
p = np.dot(affine, np.array([x, y, z, 1]))[:3]
points.InsertNextPoint(p[0], p[1], p[2])
# The radial direction from the cavity centre to this point
d_rad = p - lv_centre
d_rad = d_rad / np.linalg.norm(d_rad)
points_radial.InsertNextTuple3(d_rad[0], d_rad[1], d_rad[2])
# Record the type of the point (1 = endo, 2 = epi)
points_label.InsertNextTuple1(1)
# Record the AHA segment ID
seg_id = determine_aha_segment_id(p, lv_centre, aha_axis, part_z[z])
points_aha.InsertNextTuple1(seg_id)
# Record the first point of the current contour
if i == 0:
contour_start_id = point_id
# Add the circumferential line
if i == (N - 1):
lines.InsertNextCell(2, [point_id, contour_start_id])
else:
lines.InsertNextCell(2, [point_id, point_id + 1])
lines_aha.InsertNextTuple1(seg_id)
# Line direction (1 = radial, 2 = circumferential, 3 = longitudinal)
lines_dir.InsertNextTuple1(2)
# Add the radial line for every few points
n_radial = 36
M = int(round(N / float(n_radial)))
if i % M == 0:
# The closest epicardial points
ids = vtk.vtkIdList()
n_ids = 10
locator.FindClosestNPoints(n_ids, p, ids)
# The point that aligns with the radial direction
val = []
for j in range(n_ids):
q = epi_points.GetPoint(ids.GetId(j))
d = (q - lv_centre) / np.linalg.norm(q - lv_centre)
val += [np.dot(d, d_rad)]
val = np.array(val)
epi_point_id = ids.GetId(np.argmax(val))
# Add the radial line
lines.InsertNextCell(2, [point_id, epi_point_id])
lines_aha.InsertNextTuple1(seg_id)
# Line direction (1 = radial, 2 = circumferential, 3 = longitudinal)
lines_dir.InsertNextTuple1(1)
# Increment the point index
point_id += 1
# Save the contour for each slice
poly = vtk.vtkPolyData()
poly.SetPoints(points)
poly.GetPointData().AddArray(points_label)
poly.GetPointData().AddArray(points_aha)
poly.GetPointData().AddArray(points_radial)
poly.SetLines(lines)
poly.GetCellData().AddArray(lines_aha)
poly.GetCellData().AddArray(lines_dir)
writer = vtk.vtkPolyDataWriter()
contour_name = '{0}{1:02d}.vtk'.format(contour_name_stem, z)
writer.SetFileName(contour_name)
writer.SetInputData(poly)
writer.Write()
os.system('sed -i "1s/4.1/4.0/" {0}'.format(contour_name))
def evaluate_strain_by_length(contour_name_stem, T, dt, output_name_stem):
""" Calculate the strain based on the line length """
# Read the polydata at the first time frame (ED frame)
fr = 0
reader = vtk.vtkPolyDataReader()
reader.SetFileName('{0}{1:02d}.vtk'.format(contour_name_stem, fr))
reader.Update()
poly = reader.GetOutput()
points = poly.GetPoints()
# Calculate the length of each line
lines = poly.GetLines()
lines_aha = poly.GetCellData().GetArray('Segment ID')
lines_dir = poly.GetCellData().GetArray('Direction ID')
n_lines = lines.GetNumberOfCells()
length_ED = np.zeros(n_lines)
seg_id = np.zeros(n_lines)
dir_id = np.zeros(n_lines)
lines.InitTraversal()
for i in range(n_lines):
ids = vtk.vtkIdList()
lines.GetNextCell(ids)
p1 = np.array(points.GetPoint(ids.GetId(0)))
p2 = np.array(points.GetPoint(ids.GetId(1)))
d = np.linalg.norm(p1 - p2)
seg_id[i] = lines_aha.GetValue(i)
dir_id[i] = lines_dir.GetValue(i)
length_ED[i] = d
# For each time frame, calculate the strain, i.e. change of length
table_strain = {}
table_strain['radial'] = np.zeros((17, T))
table_strain['circum'] = np.zeros((17, T))
for fr in range(0, T):
# Read the polydata
reader = vtk.vtkPolyDataReader()
filename = '{0}{1:02d}.vtk'.format(contour_name_stem, fr)
reader.SetFileName(filename)
reader.Update()
poly = reader.GetOutput()
points = poly.GetPoints()
# Calculate the strain for each line
lines = poly.GetLines()
n_lines = lines.GetNumberOfCells()
strain = np.zeros(n_lines)
vtk_strain = vtk.vtkFloatArray()
vtk_strain.SetName('Strain')
lines.InitTraversal()
for i in range(n_lines):
ids = vtk.vtkIdList()
lines.GetNextCell(ids)
p1 = np.array(points.GetPoint(ids.GetId(0)))
p2 = np.array(points.GetPoint(ids.GetId(1)))
d = np.linalg.norm(p1 - p2)
# Strain of this line (unit: %)
strain[i] = (d - length_ED[i]) / length_ED[i] * 100
vtk_strain.InsertNextTuple1(strain[i])
# Save the strain array to the vtk file
poly.GetCellData().AddArray(vtk_strain)
writer = vtk.vtkPolyDataWriter()
writer.SetInputData(poly)
writer.SetFileName(filename)
writer.Write()
os.system('sed -i "1s/4.1/4.0/" {0}'.format(filename))
# Calculate the segmental and global strains
for i in range(0, 16):
table_strain['radial'][i, fr] = np.mean(strain[(seg_id == (i + 1)) & (dir_id == 1)])
table_strain['circum'][i, fr] = np.mean(strain[(seg_id == (i + 1)) & (dir_id == 2)])
table_strain['radial'][-1, fr] = np.mean(strain[dir_id == 1])
table_strain['circum'][-1, fr] = np.mean(strain[dir_id == 2])
for c in ['radial', 'circum']:
# Save into csv files
index = [str(x) for x in np.arange(1, 17)] + ['Global']
column = np.arange(0, T) * dt * 1e3
df = pd.DataFrame(table_strain[c], index=index, columns=column)
df.to_csv('{0}_{1}.csv'.format(output_name_stem, c))
def cine_2d_sa_motion_and_strain_analysis(data_dir, par_dir, output_dir, output_name_stem):
""" Perform motion tracking and strain analysis for cine MR images. """
# Crop the image to save computation for image registration
# Focus on the left ventricle so that motion tracking is less affected by
# the movement of RV and LV outflow tract
padding('{0}/seg_sa_ED.nii.gz'.format(data_dir),
'{0}/seg_sa_ED.nii.gz'.format(data_dir),
'{0}/seg_sa_lv_ED.nii.gz'.format(output_dir), 3, 0)
auto_crop_image('{0}/seg_sa_lv_ED.nii.gz'.format(output_dir),
'{0}/seg_sa_lv_crop_ED.nii.gz'.format(output_dir), 20)
os.system('mirtk transform-image {0}/sa.nii.gz {1}/sa_crop.nii.gz '
'-target {1}/seg_sa_lv_crop_ED.nii.gz'.format(data_dir, output_dir))
os.system('mirtk transform-image {0}/seg_sa.nii.gz {1}/seg_sa_crop.nii.gz '
'-target {1}/seg_sa_lv_crop_ED.nii.gz'.format(data_dir, output_dir))
# Extract the myocardial contours for three slices, respectively basal, mid-cavity and apical
extract_myocardial_contour('{0}/seg_sa_ED.nii.gz'.format(data_dir),
'{0}/myo_contour_ED_z'.format(output_dir),
three_slices=True)
# Split the volume into slices
split_volume('{0}/sa_crop.nii.gz'.format(output_dir), '{0}/sa_crop_z'.format(output_dir))
split_volume('{0}/seg_sa_crop.nii.gz'.format(output_dir), '{0}/seg_sa_crop_z'.format(output_dir))
# Label class in the segmentation
label = {'BG': 0, 'LV': 1, 'Myo': 2, 'RV': 3}
# Inter-frame motion estimation
nim = nib.load('{0}/sa_crop.nii.gz'.format(output_dir))
Z = nim.header['dim'][3]
T = nim.header['dim'][4]
dt = nim.header['pixdim'][4]
dice_lv_myo = []
for z in range(Z):
if not os.path.exists('{0}/myo_contour_ED_z{1:02d}.vtk'.format(output_dir, z)):
continue
# Split the cine sequence for this slice
split_sequence('{0}/sa_crop_z{1:02d}.nii.gz'.format(output_dir, z),
'{0}/sa_crop_z{1:02d}_fr'.format(output_dir, z))
# Forward image registration
for fr in range(1, T):
target_fr = fr - 1
source_fr = fr
target = '{0}/sa_crop_z{1:02d}_fr{2:02d}.nii.gz'.format(output_dir, z, target_fr)
source = '{0}/sa_crop_z{1:02d}_fr{2:02d}.nii.gz'.format(output_dir, z, source_fr)
par = '{0}/ffd_cine_2d_motion.cfg'.format(par_dir)
dof = '{0}/ffd_z{1:02d}_pair_{2:02d}_to_{3:02d}.dof.gz'.format(output_dir, z, target_fr, source_fr)
os.system('mirtk register {0} {1} -parin {2} -dofout {3}'.format(target, source, par, dof))
# Compose forward inter-frame transformation fields
os.system('cp {0}/ffd_z{1:02d}_pair_00_to_01.dof.gz '
'{0}/ffd_z{1:02d}_forward_00_to_01.dof.gz'.format(output_dir, z))
for fr in range(2, T):
dofs = ''
for k in range(1, fr + 1):
dof = '{0}/ffd_z{1:02d}_pair_{2:02d}_to_{3:02d}.dof.gz'.format(output_dir, z, k - 1, k)
dofs += dof + ' '
dof_out = '{0}/ffd_z{1:02d}_forward_00_to_{2:02d}.dof.gz'.format(output_dir, z, fr)
os.system('mirtk compose-dofs {0} {1} -approximate'.format(dofs, dof_out))
# Backward image registration
for fr in range(T - 1, 0, -1):
target_fr = (fr + 1) % T
source_fr = fr
target = '{0}/sa_crop_z{1:02d}_fr{2:02d}.nii.gz'.format(output_dir, z, target_fr)
source = '{0}/sa_crop_z{1:02d}_fr{2:02d}.nii.gz'.format(output_dir, z, source_fr)
par = '{0}/ffd_cine_2d_motion.cfg'.format(par_dir)
dof = '{0}/ffd_z{1:02d}_pair_{2:02d}_to_{3:02d}.dof.gz'.format(output_dir, z, target_fr, source_fr)
os.system('mirtk register {0} {1} -parin {2} -dofout {3}'.format(target, source, par, dof))
# Compose backward inter-frame transformation fields
os.system('cp {0}/ffd_z{1:02d}_pair_00_to_{2:02d}.dof.gz '
'{0}/ffd_z{1:02d}_backward_00_to_{2:02d}.dof.gz'.format(output_dir, z, T - 1))
for fr in range(T - 2, 0, -1):
dofs = ''
for k in range(T - 1, fr - 1, -1):
dof = '{0}/ffd_z{1:02d}_pair_{2:02d}_to_{3:02d}.dof.gz'.format(output_dir, z,
(k + 1) % T, k)
dofs += dof + ' '
dof_out = '{0}/ffd_z{1:02d}_backward_00_to_{2:02d}.dof.gz'.format(output_dir, z, fr)
os.system('mirtk compose-dofs {0} {1} -approximate'.format(dofs, dof_out))
# Average the forward and backward transformations
os.system('mirtk init-dof {0}/ffd_z{1:02d}_forward_00_to_00.dof.gz'.format(output_dir, z))
os.system('mirtk init-dof {0}/ffd_z{1:02d}_backward_00_to_00.dof.gz'.format(output_dir, z))
os.system('mirtk init-dof {0}/ffd_z{1:02d}_00_to_00.dof.gz'.format(output_dir, z))
for fr in range(1, T):
dof_forward = '{0}/ffd_z{1:02d}_forward_00_to_{2:02d}.dof.gz'.format(output_dir, z, fr)
weight_forward = float(T - fr) / T
dof_backward = '{0}/ffd_z{1:02d}_backward_00_to_{2:02d}.dof.gz'.format(output_dir, z, fr)
weight_backward = float(fr) / T
dof_combine = '{0}/ffd_z{1:02d}_00_to_{2:02d}.dof.gz'.format(output_dir, z, fr)
os.system('average_3d_ffd 2 {0} {1} {2} {3} {4}'.format(dof_forward, weight_forward,
dof_backward, weight_backward,
dof_combine))
# Transform the contours
for fr in range(0, T):
os.system('mirtk transform-points {0}/myo_contour_ED_z{1:02d}.vtk '
'{0}/myo_contour_z{1:02d}_fr{2:02d}.vtk '
'-dofin {0}/ffd_z{1:02d}_00_to_{2:02d}.dof.gz'.format(output_dir, z, fr))
# Transform the segmentation and evaluate the Dice metric
eval_dice = False
if eval_dice:
split_sequence('{0}/seg_sa_crop_z{1:02d}.nii.gz'.format(output_dir, z),
'{0}/seg_sa_crop_z{1:02d}_fr'.format(output_dir, z))
image_names = []
for fr in range(0, T):
os.system('mirtk transform-image {0}/seg_sa_crop_z{1:02d}_fr{2:02d}.nii.gz '
'{0}/seg_sa_crop_warp_ffd_z{1:02d}_fr{2:02d}.nii.gz '
'-dofin {0}/ffd_z{1:02d}_00_to_{2:02d}.dof.gz '
'-target {0}/seg_sa_crop_z{1:02d}_fr00.nii.gz'.format(output_dir, z, fr))
image_A = nib.load('{0}/seg_sa_crop_z{1:02d}_fr00.nii.gz'.format(output_dir, z)).get_data()
image_B = nib.load('{0}/seg_sa_crop_warp_ffd_z{1:02d}_fr{2:02d}.nii.gz'.format(output_dir, z, fr)).get_data()
dice_lv_myo += [[np_categorical_dice(image_A, image_B, 1),
np_categorical_dice(image_A, image_B, 2)]]
image_names += ['{0}/seg_sa_crop_warp_ffd_z{1:02d}_fr{2:02d}.nii.gz'.format(output_dir, z, fr)]
combine_name = '{0}/seg_sa_crop_warp_ffd_z{1:02d}.nii.gz'.format(output_dir, z)
make_sequence(image_names, dt, combine_name)
if eval_dice:
print(np.mean(dice_lv_myo, axis=0))
df_dice = pd.DataFrame(dice_lv_myo)
df_dice.to_csv('{0}/dice_cine_warp_ffd.csv'.format(output_dir), index=None, header=None)
# Merge the 2D tracked contours from all the slice
for fr in range(0, T):
append = vtk.vtkAppendPolyData()
reader = {}
for z in range(Z):
if not os.path.exists('{0}/myo_contour_z{1:02d}_fr{2:02d}.vtk'.format(output_dir, z, fr)):
continue
reader[z] = vtk.vtkPolyDataReader()
reader[z].SetFileName('{0}/myo_contour_z{1:02d}_fr{2:02d}.vtk'.format(output_dir, z, fr))
reader[z].Update()
append.AddInputData(reader[z].GetOutput())
append.Update()
writer = vtk.vtkPolyDataWriter()
writer.SetFileName('{0}/myo_contour_fr{1:02d}.vtk'.format(output_dir, fr))
writer.SetInputData(append.GetOutput())
writer.Write()
# Calculate the strain based on the line length
evaluate_strain_by_length('{0}/myo_contour_fr'.format(output_dir), T, dt, output_name_stem)
def remove_mitral_valve_points(endo_contour, epi_contour, mitral_plane):
""" Remove the mitral valve points from the contours and
start the contours from the point next to the mitral valve plane.
So connecting the lines will be easier in the next step.
"""
N = endo_contour.shape[0]
start_i = 0
for i in range(N):
y, x = endo_contour[i]
prev_y, prev_x = endo_contour[(i - 1) % N]
if not mitral_plane[x, y] and mitral_plane[prev_x, prev_y]:
start_i = i
break
endo_contour = np.concatenate((endo_contour[start_i:], endo_contour[:start_i]))
N = endo_contour.shape[0]
end_i = N
for i in range(N):
y, x = endo_contour[i]
if mitral_plane[x, y]:
end_i = i
break
endo_contour = endo_contour[:end_i]
N = epi_contour.shape[0]
start_i = 0
for i in range(N):
y, x = epi_contour[i]
y2, x2 = epi_contour[(i - 1) % N]
if not mitral_plane[x, y] and mitral_plane[x2, y2]:
start_i = i
break
epi_contour = np.concatenate((epi_contour[start_i:], epi_contour[:start_i]))
N = epi_contour.shape[0]
end_i = N
for i in range(N):
y, x = epi_contour[i]
if mitral_plane[x, y]:
end_i = i
break
epi_contour = epi_contour[:end_i]
return endo_contour, epi_contour
def determine_la_aha_part(seg_la, affine_la, affine_sa):
""" Extract the mid-line of the left ventricle, record its index
along the long-axis and determine the part for each index.
"""
# Label class in the segmentation
label = {'BG': 0, 'LV': 1, 'Myo': 2, 'RV': 3, 'LA': 4, 'RA': 5}
# Sort the left ventricle and myocardium points according to their long-axis locations
lv_myo_points = []
X, Y = seg_la.shape[:2]
z = 0
for y in range(Y):
for x in range(X):
if seg_la[x, y] == label['LV'] or seg_la[x, y] == label['Myo']:
z_sa = np.dot(np.linalg.inv(affine_sa), np.dot(affine_la, np.array([x, y, z, 1])))[2]
la_idx = int(round(z_sa * 2))
lv_myo_points += [[x, y, la_idx]]
lv_myo_points = np.array(lv_myo_points)
lv_myo_idx_min = np.min(lv_myo_points[:, 2])
lv_myo_idx_max = np.max(lv_myo_points[:, 2])
# Determine the AHA part according to the slice location along the long-axis
if affine_sa[2, 2] > 0:
la_idx = np.arange(lv_myo_idx_max, lv_myo_idx_min, -1)
else:
la_idx = np.arange(lv_myo_idx_min, lv_myo_idx_max + 1, 1)
n_la_idx = len(la_idx)
i1 = int(math.ceil(n_la_idx / 3.0))
i2 = int(math.ceil(2 * n_la_idx / 3.0))
i3 = n_la_idx
part_z = {}
for i in range(0, i1):
part_z[la_idx[i]] = 'basal'
for i in range(i1, i2):
part_z[la_idx[i]] = 'mid'
for i in range(i2, i3):
part_z[la_idx[i]] = 'apical'
# Extract the mid-line of left ventricle endocardium.
# Only use the endocardium points so that it would not be affected by
# the myocardium points at the most basal slices.
lv_points = []
X, Y = seg_la.shape[:2]
z = 0
for y in range(Y):
for x in range(X):
if seg_la[x, y] == label['LV']:
z_sa = np.dot(np.linalg.inv(affine_sa), np.dot(affine_la, np.array([x, y, z, 1])))[2]
la_idx = int(round(z_sa * 2))
lv_points += [[x, y, la_idx]]
lv_points = np.array(lv_points)
lv_idx_min = np.min(lv_points[:, 2])
lv_idx_max = np.max(lv_points[:, 2])
mid_line = {}
for la_idx in range(lv_idx_min, lv_idx_max + 1):
mx, my = np.mean(lv_points[lv_points[:, 2] == la_idx, :2], axis=0)
mid_line[la_idx] = np.dot(affine_la, np.array([mx, my, z, 1]))[:3]
for la_idx in range(lv_myo_idx_min, lv_idx_min):
mid_line[la_idx] = mid_line[lv_idx_min]
for la_idx in range(lv_idx_max, lv_myo_idx_max + 1):
mid_line[la_idx] = mid_line[lv_idx_max]
return part_z, mid_line
def determine_la_aha_segment_id(point, la_idx, axis, mid_line, part_z):
""" Determine the AHA segment ID given a point on long-axis images.
"""
# The mid-point at this position
mid_point = mid_line[la_idx]
# The line from the mid-point to the contour point
vec = point - mid_point
if np.dot(vec, axis['lv_to_sep']) > 0:
# This is spetum
if part_z[la_idx] == 'basal':
# basal septal
seg_id = 1
elif part_z[la_idx] == 'mid':
# mid septal
seg_id = 3
elif part_z[la_idx] == 'apical':
# apical septal
seg_id = 5
else:
# This is lateral
if part_z[la_idx] == 'basal':
# basal lateral
seg_id = 2
elif part_z[la_idx] == 'mid':
# mid lateral
seg_id = 4
elif part_z[la_idx] == 'apical':
# apical lateral
seg_id = 6
return seg_id
def extract_la_myocardial_contour(seg_la_name, seg_sa_name, contour_name):
""" Extract the myocardial contours on long-axis images.
Also, determine the AHA segment ID for all the contour points.
"""
# Read the segmentation image
nim = nib.load(seg_la_name)
X, Y, Z = nim.header['dim'][1:4]
affine = nim.affine
seg = nim.get_data()
# Label class in the segmentation
label = {'BG': 0, 'LV': 1, 'Myo': 2, 'RV': 3, 'LA': 4, 'RA': 5}
# Determine the AHA coordinate system using the mid-cavity slice of short-axis images
nim_sa = nib.load(seg_sa_name)
affine_sa = nim_sa.affine
seg_sa = nim_sa.get_data()
aha_axis = determine_aha_coordinate_system(seg_sa, affine_sa)
# Construct the points set and data arrays to represent both endo and epicardial contours
points = vtk.vtkPoints()
points_radial = vtk.vtkFloatArray()
points_radial.SetName('Direction_Radial')
points_radial.SetNumberOfComponents(3)
points_label = vtk.vtkIntArray()
points_label.SetName('Label')
points_aha = vtk.vtkIntArray()
points_aha.SetName('Segment ID')
point_id = 0
lines = vtk.vtkCellArray()
lines_aha = vtk.vtkIntArray()
lines_aha.SetName('Segment ID')
lines_dir = vtk.vtkIntArray()
lines_dir.SetName('Direction ID')
# Check whether there is the endocardial segmentation
# Only keep the largest connected component
z = 0
seg_z = seg[:, :, z]
endo = (seg_z == label['LV']).astype(np.uint8)
endo = get_largest_cc(endo).astype(np.uint8)
# The myocardium may be split to two parts due to the very thin apex.
# So we do not apply get_largest_cc() to it. However, we remove small pieces, which
# may cause problems in determining the contours.
myo = (seg_z == label['Myo']).astype(np.uint8)
myo = remove_small_cc(myo).astype(np.uint8)
epi = (endo | myo).astype(np.uint8)
epi = get_largest_cc(epi).astype(np.uint8)
# Extract endocardial contour
# Note: cv2 considers an input image as a Y x X array, which is different
# from nibabel which assumes a X x Y array.
_, contours, _ = cv2.findContours(cv2.inRange(endo, 1, 1), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
endo_contour = contours[0][:, 0, :]
# Extract epicardial contour
_, contours, _ = cv2.findContours(cv2.inRange(epi, 1, 1), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
epi_contour = contours[0][:, 0, :]
# Record the points located on the mitral valve plane.
mitral_plane = np.zeros(seg_z.shape)
N = epi_contour.shape[0]
for i in range(N):
y, x = epi_contour[i]
if endo[x, y]:
mitral_plane[x, y] = 1
# Remove the mitral valve points from the contours and
# start the contours from the point next to the mitral valve plane.
# So connecting the lines will be easier in the next step.
if np.sum(mitral_plane) >= 1:
endo_contour, epi_contour = remove_mitral_valve_points(endo_contour, epi_contour, mitral_plane)
# Note that remove_mitral_valve_points may fail if the endo or epi has more
# than one connected components. As a result, the endo_contour or epi_contour
# may only have zero or one points left, which cause problems for approximate_contour.
# Smooth the contours
if len(endo_contour) >= 2:
endo_contour = approximate_contour(endo_contour)
if len(epi_contour) >= 2:
epi_contour = approximate_contour(epi_contour)
# Determine the aha part and extract the mid-line of the left ventricle
part_z, mid_line = determine_la_aha_part(seg_z, affine, affine_sa)
la_idx_min = np.array([x for x in part_z.keys()]).min()
la_idx_max = np.array([x for x in part_z.keys()]).max()
# Go through the endo contour points
N = endo_contour.shape[0]
for i in range(N):
y, x = endo_contour[i]
# The world coordinate of this point
p = np.dot(affine, np.array([x, y, z, 1]))[:3]
points.InsertNextPoint(p[0], p[1], p[2])
# The index along the long axis
z_sa = np.dot(np.linalg.inv(affine_sa), np.hstack([p, 1]))[2]
la_idx = int(round(z_sa * 2))
la_idx = max(la_idx, la_idx_min)
la_idx = min(la_idx, la_idx_max)
# The radial direction
mid_point = mid_line[la_idx]
d = p - mid_point
d = d / np.linalg.norm(d)
points_radial.InsertNextTuple3(d[0], d[1], d[2])
# Record the type of the point (1 = endo, 2 = epi)
points_label.InsertNextTuple1(1)
# Record the segment ID
seg_id = determine_la_aha_segment_id(p, la_idx, aha_axis, mid_line, part_z)
points_aha.InsertNextTuple1(seg_id)
# Add the line
if i < (N - 1):
lines.InsertNextCell(2, [point_id, point_id + 1])
lines_aha.InsertNextTuple1(seg_id)
# Line direction (1 = radial, 2 = circumferential, 3 = longitudinal)
lines_dir.InsertNextTuple1(3)
# Increment the point index
point_id += 1
# Go through the epi contour points
N = epi_contour.shape[0]
for i in range(N):
y, x = epi_contour[i]
# The world coordinate of this point
p = np.dot(affine, np.array([x, y, z, 1]))[:3]
points.InsertNextPoint(p[0], p[1], p[2])
# The index along the long axis
z_sa = np.dot(np.linalg.inv(affine_sa), np.hstack([p, 1]))[2]
la_idx = int(round(z_sa * 2))
la_idx = max(la_idx, la_idx_min)
la_idx = min(la_idx, la_idx_max)
# The radial direction
mid_point = mid_line[la_idx]
d = p - mid_point
d = d / np.linalg.norm(d)
points_radial.InsertNextTuple3(d[0], d[1], d[2])
# Record the type of the point (1 = endo, 2 = epi)
points_label.InsertNextTuple1(2)
# Record the segment ID
seg_id = determine_la_aha_segment_id(p, la_idx, aha_axis, mid_line, part_z)
points_aha.InsertNextTuple1(seg_id)
# Add the line
if i < (N - 1):
lines.InsertNextCell(2, [point_id, point_id + 1])
lines_aha.InsertNextTuple1(seg_id)
# Line direction (1 = radial, 2 = circumferential, 3 = longitudinal)
lines_dir.InsertNextTuple1(3)
# Increment the point index
point_id += 1
# Save to a vtk file
poly = vtk.vtkPolyData()
poly.SetPoints(points)
poly.GetPointData().AddArray(points_label)
poly.GetPointData().AddArray(points_aha)
poly.GetPointData().AddArray(points_radial)
poly.SetLines(lines)
poly.GetCellData().AddArray(lines_aha)
poly.GetCellData().AddArray(lines_dir)
writer = vtk.vtkPolyDataWriter()
writer.SetFileName(contour_name)
writer.SetInputData(poly)
writer.Write()
# Change vtk file version to 4.0 to avoid the warning by MIRTK, which is
# developed using VTK 6.3, which does not know file version 4.1.
os.system('sed -i "1s/4.1/4.0/" {0}'.format(contour_name))
def evaluate_la_strain_by_length(contour_name_stem, T, dt, output_name_stem):
""" Calculate the strain based on the line length """
# Read the polydata at the first time frame (ED frame)
fr = 0
reader = vtk.vtkPolyDataReader()
reader.SetFileName('{0}{1:02d}.vtk'.format(contour_name_stem, fr))
reader.Update()
poly = reader.GetOutput()
points = poly.GetPoints()
# Calculate the length of each line
lines = poly.GetLines()
lines_aha = poly.GetCellData().GetArray('Segment ID')
lines_dir = poly.GetCellData().GetArray('Direction ID')
n_lines = lines.GetNumberOfCells()
length_ED = np.zeros(n_lines)
seg_id = np.zeros(n_lines)
dir_id = np.zeros(n_lines)
lines.InitTraversal()
for i in range(n_lines):
ids = vtk.vtkIdList()
lines.GetNextCell(ids)
p1 = np.array(points.GetPoint(ids.GetId(0)))
p2 = np.array(points.GetPoint(ids.GetId(1)))
d = np.linalg.norm(p1 - p2)
seg_id[i] = lines_aha.GetValue(i)
dir_id[i] = lines_dir.GetValue(i)
length_ED[i] = d
# For each time frame, calculate the strain, i.e. change of length
table_strain = {}
table_strain['longit'] = np.zeros((7, T))
for fr in range(0, T):
# Read the polydata
reader = vtk.vtkPolyDataReader()
filename = '{0}{1:02d}.vtk'.format(contour_name_stem, fr)
reader.SetFileName(filename)
reader.Update()
poly = reader.GetOutput()
points = poly.GetPoints()
# Calculate the strain for each line
lines = poly.GetLines()
n_lines = lines.GetNumberOfCells()
strain = np.zeros(n_lines)
vtk_strain = vtk.vtkFloatArray()
vtk_strain.SetName('Strain')
lines.InitTraversal()
for i in range(n_lines):
ids = vtk.vtkIdList()
lines.GetNextCell(ids)
p1 = np.array(points.GetPoint(ids.GetId(0)))
p2 = np.array(points.GetPoint(ids.GetId(1)))
d = np.linalg.norm(p1 - p2)
# Strain of this line (unit: %)
strain[i] = (d - length_ED[i]) / length_ED[i] * 100
vtk_strain.InsertNextTuple1(strain[i])
# Save the strain array to the vtk file
poly.GetCellData().AddArray(vtk_strain)
writer = vtk.vtkPolyDataWriter()
writer.SetInputData(poly)
writer.SetFileName(filename)
writer.Write()
os.system('sed -i "1s/4.1/4.0/" {0}'.format(filename))
# Calculate the segmental and global strains
for i in range(6):
table_strain['longit'][i, fr] = np.mean(strain[(seg_id == (i + 1)) & (dir_id == 3)])
table_strain['longit'][-1, fr] = np.mean(strain[dir_id == 3])
for c in ['longit']:
# Save into csv files
index = [str(x) for x in np.arange(1, 7)] + ['Global']
column = np.arange(0, T) * dt * 1e3
df = pd.DataFrame(table_strain[c], index=index, columns=column)
df.to_csv('{0}_{1}.csv'.format(output_name_stem, c))
def cine_2d_la_motion_and_strain_analysis(data_dir, par_dir, output_dir, output_name_stem):
""" Perform motion tracking and strain analysis for cine MR images. """
# Crop the image to save computation for image registration
# Focus on the left ventricle so that motion tracking is less affected by
# the movement of RV and LV outflow tract
padding('{0}/seg4_la_4ch_ED.nii.gz'.format(data_dir),
'{0}/seg4_la_4ch_ED.nii.gz'.format(data_dir),
'{0}/seg4_la_4ch_lv_ED.nii.gz'.format(output_dir), 2, 1)
padding('{0}/seg4_la_4ch_lv_ED.nii.gz'.format(output_dir),
'{0}/seg4_la_4ch_lv_ED.nii.gz'.format(output_dir),
'{0}/seg4_la_4ch_lv_ED.nii.gz'.format(output_dir), 3, 0)
padding('{0}/seg4_la_4ch_lv_ED.nii.gz'.format(output_dir),
'{0}/seg4_la_4ch_lv_ED.nii.gz'.format(output_dir),
'{0}/seg4_la_4ch_lv_ED.nii.gz'.format(output_dir), 4, 0)
padding('{0}/seg4_la_4ch_lv_ED.nii.gz'.format(output_dir),
'{0}/seg4_la_4ch_lv_ED.nii.gz'.format(output_dir),
'{0}/seg4_la_4ch_lv_ED.nii.gz'.format(output_dir), 5, 0)
auto_crop_image('{0}/seg4_la_4ch_lv_ED.nii.gz'.format(output_dir),
'{0}/seg4_la_4ch_lv_crop_ED.nii.gz'.format(output_dir), 20)
os.system('mirtk transform-image {0}/la_4ch.nii.gz {1}/la_4ch_crop.nii.gz '
'-target {1}/seg4_la_4ch_lv_crop_ED.nii.gz'.format(data_dir, output_dir))
os.system('mirtk transform-image {0}/seg4_la_4ch.nii.gz {1}/seg4_la_4ch_crop.nii.gz '
'-target {1}/seg4_la_4ch_lv_crop_ED.nii.gz'.format(data_dir, output_dir))
# Extract the myocardial contour
extract_la_myocardial_contour('{0}/seg4_la_4ch_ED.nii.gz'.format(data_dir),
'{0}/seg_sa_ED.nii.gz'.format(data_dir),
'{0}/la_4ch_myo_contour_ED.vtk'.format(output_dir))
# Inter-frame motion estimation
nim = nib.load('{0}/la_4ch_crop.nii.gz'.format(output_dir))
T = nim.header['dim'][4]
dt = nim.header['pixdim'][4]
# Label class in the segmentation
label = {'BG': 0, 'LV': 1, 'Myo': 2, 'RV': 3, 'LA': 4, 'RA': 5}
# Split the cine sequence
split_sequence('{0}/la_4ch_crop.nii.gz'.format(output_dir),
'{0}/la_4ch_crop_fr'.format(output_dir))
# Forward image registration
for fr in range(1, T):
target_fr = fr - 1
source_fr = fr
target = '{0}/la_4ch_crop_fr{1:02d}.nii.gz'.format(output_dir, target_fr)
source = '{0}/la_4ch_crop_fr{1:02d}.nii.gz'.format(output_dir, source_fr)
par = '{0}/ffd_cine_la_2d_motion.cfg'.format(par_dir)
dof = '{0}/ffd_la_4ch_pair_{1:02d}_to_{2:02d}.dof.gz'.format(output_dir, target_fr, source_fr)
os.system('mirtk register {0} {1} -parin {2} -dofout {3}'.format(target, source, par, dof))
# Compose forward inter-frame transformation fields
os.system('cp {0}/ffd_la_4ch_pair_00_to_01.dof.gz '
'{0}/ffd_la_4ch_forward_00_to_01.dof.gz'.format(output_dir))
for fr in range(2, T):
dofs = ''
for k in range(1, fr + 1):
dof = '{0}/ffd_la_4ch_pair_{1:02d}_to_{2:02d}.dof.gz'.format(output_dir, k - 1, k)
dofs += dof + ' '
dof_out = '{0}/ffd_la_4ch_forward_00_to_{1:02d}.dof.gz'.format(output_dir, fr)
os.system('mirtk compose-dofs {0} {1} -approximate'.format(dofs, dof_out))
# Backward image registration
for fr in range(T - 1, 0, -1):
target_fr = (fr + 1) % T
source_fr = fr
target = '{0}/la_4ch_crop_fr{1:02d}.nii.gz'.format(output_dir, target_fr)
source = '{0}/la_4ch_crop_fr{1:02d}.nii.gz'.format(output_dir, source_fr)
par = '{0}/ffd_cine_la_2d_motion.cfg'.format(par_dir)
dof = '{0}/ffd_la_4ch_pair_{1:02d}_to_{2:02d}.dof.gz'.format(output_dir, target_fr, source_fr)
os.system('mirtk register {0} {1} -parin {2} -dofout {3}'.format(target, source, par, dof))
# Compose backward inter-frame transformation fields
os.system('cp {0}/ffd_la_4ch_pair_00_to_{1:02d}.dof.gz '
'{0}/ffd_la_4ch_backward_00_to_{1:02d}.dof.gz'.format(output_dir, T - 1))
for fr in range(T - 2, 0, -1):
dofs = ''
for k in range(T - 1, fr - 1, -1):
dof = '{0}/ffd_la_4ch_pair_{1:02d}_to_{2:02d}.dof.gz'.format(output_dir, (k + 1) % T, k)
dofs += dof + ' '
dof_out = '{0}/ffd_la_4ch_backward_00_to_{1:02d}.dof.gz'.format(output_dir, fr)
os.system('mirtk compose-dofs {0} {1} -approximate'.format(dofs, dof_out))
# Average the forward and backward transformations
os.system('mirtk init-dof {0}/ffd_la_4ch_forward_00_to_00.dof.gz'.format(output_dir))
os.system('mirtk init-dof {0}/ffd_la_4ch_backward_00_to_00.dof.gz'.format(output_dir))
os.system('mirtk init-dof {0}/ffd_la_4ch_00_to_00.dof.gz'.format(output_dir))
for fr in range(1, T):
dof_forward = '{0}/ffd_la_4ch_forward_00_to_{1:02d}.dof.gz'.format(output_dir, fr)
weight_forward = float(T - fr) / T
dof_backward = '{0}/ffd_la_4ch_backward_00_to_{1:02d}.dof.gz'.format(output_dir, fr)
weight_backward = float(fr) / T
dof_combine = '{0}/ffd_la_4ch_00_to_{1:02d}.dof.gz'.format(output_dir, fr)
os.system('average_3d_ffd 2 {0} {1} {2} {3} {4}'.format(dof_forward, weight_forward,
dof_backward, weight_backward,
dof_combine))
# Transform the contours and calculate the strain
for fr in range(0, T):
os.system('mirtk transform-points {0}/la_4ch_myo_contour_ED.vtk '
'{0}/la_4ch_myo_contour_fr{1:02d}.vtk '
'-dofin {0}/ffd_la_4ch_00_to_{1:02d}.dof.gz'.format(output_dir, fr))
# Calculate the strain based on the line length
evaluate_la_strain_by_length('{0}/la_4ch_myo_contour_fr'.format(output_dir),
T, dt, output_name_stem)
# Transform the segmentation and evaluate the Dice metric
eval_dice = False
if eval_dice:
split_sequence('{0}/seg4_la_4ch_crop.nii.gz'.format(output_dir),
'{0}/seg4_la_4ch_crop_fr'.format(output_dir))
dice_lv_myo = []
image_names = []
for fr in range(0, T):
os.system('mirtk transform-image {0}/seg4_la_4ch_crop_fr{1:02d}.nii.gz '
'{0}/seg4_la_4ch_crop_warp_ffd_fr{1:02d}.nii.gz '
'-dofin {0}/ffd_la_4ch_00_to_{1:02d}.dof.gz '
'-target {0}/seg4_la_4ch_crop_fr00.nii.gz'.format(output_dir, fr))
image_A = nib.load('{0}/seg4_la_4ch_crop_fr00.nii.gz'.format(output_dir)).get_data()
image_B = nib.load('{0}/seg4_la_4ch_crop_warp_ffd_fr{1:02d}.nii.gz'.format(output_dir, fr)).get_data()
dice_lv_myo += [[np_categorical_dice(image_A, image_B, 1),
np_categorical_dice(image_A, image_B, 2)]]
image_names += ['{0}/seg4_la_4ch_crop_warp_ffd_fr{1:02d}.nii.gz'.format(output_dir, fr)]
combine_name = '{0}/seg4_la_4ch_crop_warp_ffd.nii.gz'.format(output_dir)
make_sequence(image_names, dt, combine_name)
print(np.mean(dice_lv_myo, axis=0))
df_dice = pd.DataFrame(dice_lv_myo)
df_dice.to_csv('{0}/dice_cine_la_4ch_warp_ffd.csv'.format(output_dir), index=None, header=None)
def plot_bulls_eye(data, vmin, vmax, cmap='Reds', color_line='black'):
""" Plot the bull's eye plot.
data: values for 16 segments
"""
if len(data) != 16:
print('Error: len(data) != 16!')
exit(0)
# The cartesian coordinate and the polar coordinate
x = np.linspace(-1, 1, 201)
y = np.linspace(-1, 1, 201)
xx, yy = np.meshgrid(x, y)
r = np.sqrt(xx * xx + yy * yy)
theta = np.degrees(np.arctan2(yy, xx))
# The radius and degree for each segment
R1, R2, R3, R4 = 1, 0.65, 0.3, 0.0
rad_deg = {
1: (R1, R2, 60, 120),
2: (R1, R2, 120, 180),
3: (R1, R2, -180, -120),
4: (R1, R2, -120, -60),
5: (R1, R2, -60, 0),
6: (R1, R2, 0, 60),
7: (R2, R3, 60, 120),
8: (R2, R3, 120, 180),
9: (R2, R3, -180, -120),
10: (R2, R3, -120, -60),
11: (R2, R3, -60, 0),
12: (R2, R3, 0, 60),
13: (R3, R4, 45, 135),
14: (R3, R4, 135, -135),
15: (R3, R4, -135, -45),
16: (R3, R4, -45, 45)
}
# Plot the segments
canvas = np.zeros(xx.shape)
cx, cy = (np.array(xx.shape) - 1) / 2
sz = cx
for i in range(1, 17):
val = data[i - 1]
r1, r2, theta1, theta2 = rad_deg[i]
if theta2 > theta1:
mask = ((r < r1) & (r >= r2)) & ((theta >= theta1) & (theta < theta2))
else:
mask = ((r < r1) & (r >= r2)) & ((theta >= theta1) | (theta < theta2))
canvas[mask] = val
plt.imshow(canvas, cmap=cmap, vmin=vmin, vmax=vmax)
plt.colorbar()
plt.axis('off')
plt.gca().invert_yaxis()
# Plot the circles
for r in [R1, R2, R3]:
deg = np.linspace(0, 2 * np.pi, 201)
circle_x = cx + sz * r * np.cos(deg)
circle_y = cy + sz * r * np.sin(deg)
plt.plot(circle_x, circle_y, color=color_line)
# Plot the lines between segments
for i in range(1, 17):
r1, r2, theta1, theta2 = rad_deg[i]
line_x = cx + sz * np.array([r1, r2]) * np.cos(np.radians(theta1))
line_y = cy + sz * np.array([r1, r2]) * np.sin(np.radians(theta1))
plt.plot(line_x, line_y, color=color_line)
# Plot the indicator for RV insertion points
for i in [2, 4]:
r1, r2, theta1, theta2 = rad_deg[i]
x = cx + sz * r1 * np.cos(np.radians(theta1))
y = cy + sz * r1 * np.sin(np.radians(theta1))
plt.plot([x, x - sz * 0.2], [y, y], color=color_line)
def atrium_pass_quality_control(label, label_dict):
""" Quality control for atrial volume estimation """
for l_name, l in label_dict.items():
# Criterion: the atrium does not disappear at any time point so that we can
# measure the area and length.
T = label.shape[3]
for t in range(T):
label_t = label[:, :, 0, t]
area = np.sum(label_t == l)
if area == 0:
print('The area of {0} is 0 at time frame {1}.'.format(l_name, t))
return False
return True
def evaluate_atrial_area_length(label, nim, long_axis):
""" Evaluate the atrial area and length from 2 chamber or 4 chamber view images. """
# Area per pixel
pixdim = nim.header['pixdim'][1:4]
area_per_pix = pixdim[0] * pixdim[1] * 1e-2 # Unit: cm^2
# Go through the label class
L = []
A = []
landmarks = []
labs = np.sort(list(set(np.unique(label)) - set([0])))
for i in labs:
# The binary label map
label_i = (label == i)
# Get the largest component in case we have a bad segmentation
label_i = get_largest_cc(label_i)
# Go through all the points in the atrium, sort them by the distance along the long-axis.
points_label = np.nonzero(label_i)
points = []
for j in range(len(points_label[0])):
x = points_label[0][j]
y = points_label[1][j]
points += [[x, y,
np.dot(np.dot(nim.affine, np.array([x, y, 0, 1]))[:3], long_axis)]]
points = np.array(points)
points = points[points[:, 2].argsort()]
# The centre at the top part of the atrium (top third)
n_points = len(points)
top_points = points[int(2 * n_points / 3):]
cx, cy, _ = np.mean(top_points, axis=0)
# The centre at the bottom part of the atrium (bottom third)
bottom_points = points[:int(n_points / 3)]
bx, by, _ = np.mean(bottom_points, axis=0)
# Determine the major axis by connecting the geometric centre and the bottom centre
major_axis = np.array([cx - bx, cy - by])
major_axis = major_axis / np.linalg.norm(major_axis)
# Get the intersection between the major axis and the atrium contour
px = cx + major_axis[0] * 100
py = cy + major_axis[1] * 100
qx = cx - major_axis[0] * 100
qy = cy - major_axis[1] * 100
if np.isnan(px) or np.isnan(py) or np.isnan(qx) or np.isnan(qy):
return -1, -1, -1
# Note the difference between nifti image index and cv2 image index
# nifti image index: XY
# cv2 image index: YX (height, width)
image_line = np.zeros(label_i.shape)
cv2.line(image_line, (int(qy), int(qx)), (int(py), int(px)), (1, 0, 0))
image_line = label_i & (image_line > 0)
# Sort the intersection points by the distance along long-axis
# and calculate the length of the intersection
points_line = np.nonzero(image_line)
points = []
for j in range(len(points_line[0])):
x = points_line[0][j]
y = points_line[1][j]
# World coordinate
point = np.dot(nim.affine, np.array([x, y, 0, 1]))[:3]
# Distance along the long-axis
points += [np.append(point, | np.dot(point, long_axis) | numpy.dot |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.scatter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
class ScatterTest(tf.test.TestCase):
def _VariableRankTest(self, np_scatter, tf_scatter):
np.random.seed(8)
with self.test_session():
for indices_shape in (), (2,), (2, 3), (2, 3, 4):
for extra_shape in (), (5,), (5, 6):
# Generate random indices with no duplicates for easy numpy comparison
size = | np.prod(indices_shape, dtype=np.int32) | numpy.prod |
import xml.etree.ElementTree as xmlet
from pathlib import Path
import collections
import numpy as np
from craamvert.utils import julday, TRK_TYPE, INVALID_XML_FILE
from craamvert.instruments import XML_TYPE_TO_NUMPY_TYPE, CASLEO, GMT_NEGATIVE_3
from instruments.utils.hdu_handlers import create_primary_hdu
from craamvert.instruments.poemas import POEMASDataType, POEMAS_TRK, POEMAS_FULL_NAME, POEMAS_LATITUDE_LONGITUDE_HEIGHT, \
POEMAS_FREQUENCY
PATH_TO_XML_TRK_COLUMN_NAME = {
POEMASDataType.HEADER: "POEMASDataFormatHead.xml",
POEMASDataType.BODY: "POEMASDataFormat.xml",
POEMASDataType.FULL_BODY: "POEMASFullDataFormat.xml",
}
class TRK:
def __init__(self):
# All attributes must be declared on __init__
# TRK information
self.date = None
self.time = None
self.start_time = None
self.end_time = None
self.records = None
# TRK Header data is equivalent to:
# Code, NRS, FreqNo, Freq1, Freq2, BRTMin, BRTMax
self.header_column_names = None
self.header_data = None
# TRK Body data is equivalent to:
# sec, ele_ang, azi_ang, TBL_45, TBR_45, TBL_90, TBR_90
self.body_column_names = None
self.body_data = None
self.__treated_body_column_names = None
self.__treated_body_data = None
# Fits information
self.primary_hdu = None
def convert_from_file(self, path, file_name, path_to_xml):
"""Loads data from a file and returns an `TRK` object.
Parameters:
path : pathlib.Path - Location of the TRK file in the file system.
file_name : str - Name of the TRK file.
path_to_xml : Path, optional - Location of the TRK xml description files in the file system.
Raises:
ValueError: If the filename is invalid.
"""
# Extract values equivalent to TRK header
# Code, NRS, FreqNo, Freq1, Freq2, BRTMin, BRTMax
self.header_column_names = self.__get_column_names(path_to_xml, POEMASDataType.HEADER)
trk_header_column_names_list = list()
for key, value in self.header_column_names.items():
trk_header_column_names_list.append((key, value[1], value[0]))
# Extract values equivalent to TRK body
# sec, ele_ang, azi_ang, TB
self.body_column_names = self.__get_column_names(path_to_xml, POEMASDataType.BODY)
trk_data_column_names_list = list()
for key, value in self.body_column_names.items():
trk_data_column_names_list.append((key, value[1], value[0]))
# Extract values from file that is going to be converted
# Values will match values from respective lists
# First, the header is extracted
# Code, NRS, FreqNo, Freq1, Freq2, BRTMin, BRTMax
# That's why we set count=1, otherwise all data will from file will be classified as header
# Then, data is extracted
# sec, ele_ang, azi_ang, TB
# This data is interspersed, since we want to keep reading it, we don't set count
# We set count=1 at first to read only the header, then we set offset=28 because we already read the header
if isinstance(path, bytes):
self.header_data = np.frombuffer(path, trk_header_column_names_list, count=1)
self.body_data = | np.frombuffer(path, trk_data_column_names_list, offset=28) | numpy.frombuffer |
"""
Classes for implementing prior probabilities in the context of Bayesian
modelling and inference.
"""
# std libs
import numbers
import functools as ftl
# third-party libs
import numpy as np
from scipy import stats
from scipy.stats._distn_infrastructure import rv_frozen
# local libs
from obstools.modelling.utils import prod
from recipes import pprint
from recipes.dicts import AttrReadItem, pformat as pformat_dict
#
def echo(*_):
return _
def get_shape(data):
if isinstance(data, Parameters):
return data.npar
else:
return np.shape(data)
def _walk_dtype_size(obj):
if not isinstance(obj, np.dtype):
raise ValueError('dtype please')
if obj.fields:
for k, v in obj.fields.items():
dtype, offset, *title = v
yield from _walk_dtype_size(dtype)
else:
yield prod(obj.shape)
def _walk_dtype_adapt(obj, new_base):
if not isinstance(obj, np.dtype):
raise ValueError('dtype please')
if obj.fields:
for k, v in obj.fields.items():
dtype, offset, *title = v
yield k, list(_walk_dtype_adapt(dtype, new_base))
else:
yield new_base
def format_params(names, params, uncert=None, precision=2, switch=3, sign=' ',
times='x', compact=True, unicode=True, latex=False,
engineering=False):
assert len(params) == len(names)
# FIXME: use recipes.pprint.numeric_array
s = np.vectorize(pprint.numeric, ['U10'])(params, precision, switch, sign,
times, compact, unicode, latex,
engineering)
if uncert is not None:
raise NotImplementedError # TODO
return list(map('%s = %s'.__mod__, zip(names, s)))
class _RecurseHelper(object):
"""
Helper class for initializing array subclasses by walking arbitrarily nested
object definitions.
"""
def __init__(self, allow_types=any):
self.obj_count = 0
self.allow_types = allow_types
def make_dtype(self, kws, base_dtype):
call = ftl.partial(self._make_dtype, base_dtype=base_dtype)
dtype = list(self.walk(kws, call))
# reset object count so we can reuse this method
size = self.obj_count
self.obj_count = 0
return dtype, size
def _make_dtype(self, name, data, base_dtype):
shape = get_shape(data)
self.obj_count += prod(shape)
return name, base_dtype, shape
def get_data(self, obj, flat=False, container=tuple, allow_types=any): #
# get data as nested `container` type
call = ftl.partial(self._get_data, allow_types=allow_types)
return container(self.walk(obj, call, flat, False, container))
def _get_data(self, data, allow_types=any):
self.type_assertion(data, allow_types)
return data
# return self.asscalar(None, data)[1]
@staticmethod
def get_npar(dtype):
return sum(_walk_dtype_size(dtype))
@staticmethod
def get_size(data):
return data.size
@staticmethod
def type_assertion(obj, allow_types=any):
if allow_types is any:
return obj
# print('allow types', allow_types)
if not isinstance(obj, allow_types):
raise TypeError('%s type objects are not supported' % type(obj))
@staticmethod
def asscalar(key, val):
if isinstance(val, numbers.Real):
return key, val
if np.size(val) == 1:
return key, val.item()
return key, val
# def upper_walk(self, obj, container_out=list):
#
# if
#
#
# container_out(
# self.walk(call, True, False)
# )
@staticmethod
def walk(obj, call=echo, flat=False, with_keys=True,
container_out=list, recurse_types=None):
"""
recursive walker for dict-like objects. no safeguards, don't blow
anything up!
"""
if recurse_types is None:
recurse_types = RECURSE_TYPES
# multiple dispatch item_getter for flexible obj construction
for key, item in item_getter(obj):
# make sure we have valid field names (keys)
if not isinstance(key, str):
raise ValueError('Not a valid name: %r' % key)
if isinstance(item, recurse_types):
# recurse
gen = _RecurseHelper.walk(item, call, flat, with_keys,
container_out)
if flat:
yield from gen
else:
if with_keys:
yield key, container_out(gen) # map(
else:
yield container_out(gen)
else:
# switch caller here to call(item) if with_keys is False
if with_keys:
yield call(key, item)
else:
yield call(item)
# default helper singleton
_par_help = _RecurseHelper()
# _prior_help = _RecurseHelper()
# TODO: add Constant parameter to do stuff like p.x[:2] = Constant
# can you achieve this with masked arrays ??
# TODO: show_graph to plot graph structure between models
class Parameters(np.recarray):
"""
Array subclass that serves as a base container for (nested) parameters.
Provides natural construction routine for `numpy.recarray` from
hierarchically typed data.
"""
# object type restrictions
_allow_types = any # any means no type checking will be done upon
# initialization
def __new__(cls, data=None, base_dtype=float, **kws):
"""
Parameters
----------
kws:
(name, value) pairs for named parameters
>>> Parameters(each=1, parameter=2, painstakingly=3, named=4)
(name, sequence) pairs for sequences of named parameters
>>> Parameters(coeff=[1,2,3,4],
hyper=42)
(name, dict) pairs for nested (hierarchical) parameter structures
>>> p = Parameters(Σ=dict(x=3,
y=[7, 5, 3]),
β=1)
>>> p.β # array(1.)
>>> p.Σ.x # array(3.)
"""
# the primary objective of this class is to provide the ability to
# initialize memory from (keyword, data) pairs
if data is not None:
if isinstance(data, dict):
return cls.__new__(cls, None, base_dtype, **data)
else:
# use case: Parameters([1, 2, 3, 4, 5])
# use the `numpy.rec.array` to allow for construction from a
# wide variety of compatible objects
obj = np.rec.array(data)
return obj.view(cls) # view as Parameters object
# first we have to construct the dtype by walking the (possibly nested)
# kws that define the data structure.
dtype, size = _par_help.make_dtype(kws, base_dtype)
# print(dtype)
# construct the array
obj = super(Parameters, cls).__new__(cls, (), dtype)
# can give `titles` which are aliases for names
# keep track of the total number of parameters so we can easily
# switch between structured and unstructured views
obj.npar = size
obj.base_dtype = base_dtype
# finally, populate array with data (nested tuple)
obj[...] = _par_help.get_data(kws, allow_types=cls._allow_types)
return obj
def __reduce__(self):
# unpickling helper
return Parameters, (self.to_dict(),)
def __array_finalize__(self, obj):
#
if obj is None:
# explicit constructor eg: NestedParameters(foo=1)
# logger.debug('Explicit! ' * 3)
return
# print('bla! ' * 3)
# if we get here object constructor is view casting or new-from-template
# (slice). Set `npar` here since this method sees creation of all
# `Parameters` objects
self.npar = sum(_walk_dtype_size(obj.dtype))
self.base_dtype = getattr(obj, 'base_dtype', float)
def __getattribute__(self, key):
# hack so we don't end up with un-sized array containing single object
item = super().__getattribute__(key)
#
# note: the following block causes problems downstream when checking
# the lengths of parameter sets. It may however at some point be
# useful when construction involves single named parameters. so:
# fixme: make optional. or make sure your downstream checks use
# np.size
if isinstance(item, np.ndarray):
kls = super().__getattribute__('__class__')
if not isinstance(item, kls) and (np.size(item) == 1):
return item.item()
# if not item.dtype.fields and (np.size(item) == 1):
# return np.asscalar(item)
return item
def __getitem__(self, key):
item = super().__getitem__(key)
# hack so we don't end up with un-sized array containing single object
# print('get em!!')
if not item.dtype.fields and ( | np.size(item) | numpy.size |
# cd /mnt/c/workspace/quantum_calc
# python3 test_tsp.py
from dimod import *
import numpy as np
import matplotlib.pyplot as plt
import random
import math
from dwave_qbsolv import QBSolv
from dwave.system.samplers import *
from dwave.system.composites import EmbeddingComposite
### プロキシを通す ###
# https://qiita.com/shnchr/items/bbecc7cf984bf80aee27
# import os
# os.environ["http_proxy"] = "XXXXXX"
# os.environ["https_proxy"] = "XXXXXX"
##################
def gen_problem(size=4, is_random=False):
# 地点Xと地点Yの間の経路長(コスト)
pos = []
d = {}
if is_random == False:
if size == 4:
d = {(0,1): 3,
(0,2): 20,
(0,3): 3,
(1,2): 5,
(1,3): 20,
(2,3): 4
}
# A -> B -> C -> D -> A が正解
# この時の全経路は15
elif size == 5:
d = {(0,1): 3,
(0,2): 20,
(0,3): 20,
(0,4): 4,
(1,2): 2,
(1,3): 20,
(1,4): 20,
(2,3): 3,
(2,4): 20,
(3,4): 4
}
# A -> B -> C -> D -> E -> A が正解
# この時の全経路は16
else:
for i in range(size):
# x = random.randint(0, 1000) / 100 # 0.00 ~ 10.00
# y = random.randint(0, 1000) / 100 # 0.00 ~ 10.00
x = (math.sin(math.pi * 2 * i / size) + 1) * 5
y = (math.cos(math.pi * 2 * i / size) + 1) * 5
pos.append((x, y, chr(ord('A')+i) ))
for i in range(size):
for j in range(i+1, size):
P = pos[i]
Q = pos[j]
d[(i, j)] = ( (P[0] - Q[0])**2 + (P[1] - Q[1])**2 )**0.5
return (pos, d)
def show_plots(pos, d):
[plt.plot(x, y, marker='o') for (x, y, name) in pos]
plt.show()
# WSLのUbuntu18.04でmatplotlibを使えるようにするまで
# http://ai-gaminglife.hatenablog.com/entry/2019/04/29/204841
#
# 以下を入れる
# > sudo apt install python3-tk
# > sudo apt install tk-dev
#
# Windows側で、sourceForge から VcXsrvをDLする。
#
# 実行環境の修正
# > sudo vim ~/.bashrc
# 最後の行に以下を追加して保存する。
# export DISPLAY=:0.0
def constraint_matrix(size):
# 以下のような行列を作る。
# size==3 のとき:
# [[-1, 2, 2],
# [ 0, -1, 2],
# [ 0, 0, -1]]
# size==5 のとき:
# [[-1, 2, 2, 2, 2],
# [ 0, -1, 2, 2, 2],
# [ 0, 0, -1, 2, 2],
# [ 0, 0, 0, -1, 2],
# [ 0, 0, 0, 0, -1]]
tmp = (2 * np.triu(np.ones(size), k=1) - np.identity(size))
return tmp
def show_solve(pos, path):
xs = [pos[i][0] for i in path[1]]
ys = [pos[i][1] for i in path[1]]
xs.append(xs[0]) # 最後の地点と最初の地点を結ぶため
ys.append(ys[0])
plt.plot(xs, ys, marker='o', linestyle='--')
plt.plot(xs[0], ys[0], color='red', marker='o', linestyle='--')
for x, y, name in pos:
plt.text(x, y, name)
plt.show()
if __name__ == "__main__":
SIZE=4
pos, d = gen_problem(size=SIZE, is_random=False)
# pos, d = gen_problem(size=SIZE, is_random=True)
print(pos)
print(d)
# グラフ表示するときはコメントアウト外す
# show_plots(pos, d)
# 行、列の意味:[q_A0, q_B0, ..., q_D0, qA1, ..., q_C3, qD_3]
# q_Xt: ステップtのときに、地点Xにいるかどうかを表すバイナリ変数。
H = np.zeros((SIZE**2, SIZE**2))
teisuu = 0
# 係数が 1 だとうまく答えがでない。H1に比べてペナルティが弱いため??
a1 = 10
a2 = 10
# 目的関数 H1:経路を最小化する
H1 = np.zeros((SIZE**2, SIZE**2))
for alpha in range(SIZE):
for beta in range(SIZE):
if (alpha == beta):
continue
# print(alpha, beta)
for t in range(SIZE):
dist = 0
if (alpha < beta):
dist = d[(alpha, beta)]
else:
dist = d[(beta, alpha)]
row = alpha + SIZE*t
col = beta + (SIZE*(t+1)) % (SIZE**2)
if (row < col):
H1[row, col] += dist
else:
H1[col, row] += dist # 最後のステップから、0番目のステップに戻る場合。
# QUBOの行列の下三角をすべて0にするために必要。
print("H1:\n", H1, "\n")
H += H1
tmp = constraint_matrix(SIZE)
# 目的関数 H2:各時点で訪れる地点は一つだけ
H2 = np.zeros((SIZE**2, SIZE**2))
for m in range(SIZE):
for i in range(SIZE):
for j in range(SIZE):
H2[i + SIZE*m, j + SIZE*m] += tmp[i, j] * a1
print("H2:\n", H2, "\n")
H += H2
teisuu += a1
# 目的関数 H3:各地点には一回しか訪れない
H3 = | np.zeros((SIZE**2, SIZE**2)) | numpy.zeros |
import numpy as np
import matplotlib.pyplot as plt
from typing import Tuple
def rand_orientation_mat(
k: int, p: int, xi: int, seed: None
) -> Tuple[np.ndarray, np.ndarray]:
"""Generates a random orientation matrix B*
Arguments:
k {int} -- grid dimension (how many dimensions grid has)
p {int} -- grid level (how many points in grid)
xi {int} -- elementary effect step size
Returns:
Bs {np.ndarray} -- Random orientation matrix B*
Ps {np.ndarray} -- Matrix indicating which vars change
"""
if seed != None:
np.random.seed(seed)
m = k + 1
delta = xi / (p - 1)
v = np.random.choice([-1, 1], size=k, p=[0.5, 0.5])
Ds = np.diag(v)
J = np.ones((m, k))
B = np.tril(J, k=-1)
sv = np.arange(0, (1 - delta) + 1 / (p - 1), 1 / (p - 1))
xs = np.random.choice(sv, size=k, p=np.ones((len(sv),)) / len(sv))[None, :]
ind = np.arange(k)
np.random.shuffle(ind)
Ps = np.eye(k)[:, ind]
Bs = (J[:, 0][:, None] * xs + (delta / 2) * ((2 * B - J) @ Ds + J)) @ Ps
return Bs, Ps
def sampling_matrix(
k: int, p: int, xi: int, r: int, seed=None
) -> Tuple[np.ndarray, np.ndarray]:
"""Generates a sampling matrix X consisting of r random
orientation matrices B*
Arguments:
k {int} -- grid dimension (how many dimensions grid has)
p {int} -- grid level (how many points in grid)
xi {int} -- elementary effect step size
r {int} -- number of elementary effects
Returns:
X {np.ndarray} -- r Random orientation matrices B* row concatenated
P {np.ndarray} -- Matrix indicating which vars change
"""
X = | np.zeros(((k + 1) * r, k)) | numpy.zeros |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Making Output for the Multiresolution Network
#
# The network definition is stored in the module **MultiResSmallNetwork** in the directory src/models/
#
# In this Notebook, we want to generate the output for a part of an WSI at 5x. We start by specifying some global variables.
# ## Specifying the configuration
# +
# The path to the image from which we want to make the output.
fpath = r'/home/ltran/data/test/slide1.png'
# The path to the model and the associated network
modelpath = r"/home/ltran/trg_ltran/models/multi/multires-8classes-v0.pth"
n_classes =8
class_names=["Fat", "Muscular", "Vessle", "Gland", "Stroma", "Tumor", "Necrosis", "Epithelium"]
#class_names = ["Fat", "Muscular", "Vessle", "Gland", "Stroma", "Tumor", "Epithelium"]
modulepath = '/home/ltran/trg_ltran/'
nclasses=len(class_names)
# resize, padsize, patch_size and batch_size used for the model
resize_resolutions=[1,.25] #resize input image, base and second
patch_size=64
patch_size_res2 = 128
mirror_pad_size = patch_size_res2
batch_size=128 #should be a power of 2
# number of initialized SLIC segments.
n_segments = 200000//(64*64//600)
# -
# ## Import the necessary modules
import sys
sys.path.insert(0,modulepath)
# +
import os
import numpy as np
import cv2
import matplotlib.pyplot as plt
import scipy.signal
import argparse
from torch import nn
from torchsummary import summary
from skimage import color
from albumentations import *
from albumentations.pytorch import ToTensor
import sklearn.feature_extraction.image
import matplotlib.cm
import torch
#We also import the modules in /src/
from src.models.MultiResSmallNetwork import MergeSmallNetworks
from src.output.helpers import Preprocess, CentersSLIC, Intersection, MakeOutput, OutputMasks
from tqdm.autonotebook import tqdm
from skimage.color import rgb2gray
import PIL
import glob
import dill as pickle
from skimage.color import rgb2gray, rgb2hed
from skimage.measure import *
from skimage.filters import *
from skimage.morphology import *
import time
# -
def Preprocess(img, resize, mirror_pad_size, patch_size_res2):
img= cv2.resize(img,(0,0),fx=resize,fy=resize, interpolation=PIL.Image.BICUBIC) #resize it as specified above
img = np.pad(img, [(mirror_pad_size, mirror_pad_size), (mirror_pad_size, mirror_pad_size), (0, 0)], mode="reflect")
#create the coresponding mask by using hematoxylin
#hed=rgb2hed(img)
mask=img[:, :, 2] < 241
# remove the region near the edge
mask[0:patch_size_res2,:]=0
mask[:,0:patch_size_res2]=0
mask[:,-patch_size_res2-1:]=0
mask[-patch_size_res2-1:,:]=0
mask=remove_small_objects(mask,100)
mask = remove_small_holes(mask, 500)
mask[img.sum(axis=2)<100]=0
mask[img.sum(axis=2)>700]=0
return img, mask
# ## Preprocess
# We first import the image, then preprocess it. This procedure include making the corresponding mask with some margin from the edges.
#import the image and make the mask using Preprocess helper function
img = cv2.cvtColor(cv2.imread(fpath),cv2.COLOR_BGR2RGB)
img, mask = Preprocess(img, resize_resolutions[0], mirror_pad_size, patch_size_res2)
mask = remove_small_holes(mask, 15000)
plt.imshow(mask)
# +
#plt.savefig('./Output/mask.png', dpi = 500)
# -
fig = plt.figure()
plt.imshow(img)
#plt.savefig('./Output/img.png', dpi = 500)
# ## SLIC - a simple segmentation to obtain super pixels.
#
# We next use SLIC to obtain superpixels. The goal is to have the super pixels centers on which we will use our model to predict the class.
# SLIC label
from skimage.segmentation import slic
from skimage.segmentation import mark_boundaries
start_time = time.time()
segments_slic = slic(img, n_segments=n_segments, compactness=10, sigma=1, enforce_connectivity=True)
regions = regionprops(segments_slic)
print(f"SLIC number of segments: {len(np.unique(segments_slic))}")
end_time = time.time()
print('Time:', end_time - start_time)
# %matplotlib inline
#plt.rcParams["figure.figsize"] = [10, 20]
fig, ax = plt.subplots()
print(f"SLIC number of segments: {len( | np.unique(segments_slic) | numpy.unique |
from __future__ import absolute_import, division, print_function, \
unicode_literals
import os
from netCDF4 import Dataset as netcdf_dataset
import numpy as np
import numpy.ma as ma
import xarray as xr
import glob
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.colors as cols
import matplotlib.animation as animation
from matplotlib.pyplot import cm
from matplotlib.colors import from_levels_and_colors
from matplotlib.colors import BoundaryNorm
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.ticker as mticker
import cmocean
import time
from mpas_analysis.ocean.utility import compute_zmid
def _add_land_lakes_coastline(ax):
land_50m = cfeature.NaturalEarthFeature(
'physical', 'land', '50m', edgecolor='face',
facecolor='lightgray', linewidth=0.5)
lakes_50m = cfeature.NaturalEarthFeature(
'physical', 'lakes', '50m', edgecolor='k',
facecolor='aliceblue',
linewidth=0.5)
coast_50m = cfeature.NaturalEarthFeature(
'physical', 'coastline', '50m', edgecolor='k',
facecolor='None', linewidth=0.5)
ax.add_feature(land_50m, zorder=2)
ax.add_feature(lakes_50m, zorder=3)
ax.add_feature(coast_50m, zorder=4)
meshfile = '/compyfs/inputdata/ocn/mpas-o/EC30to60E2r2/ocean.EC30to60E2r2.200908.nc'
#
runname = '20201030.alpha5_v1p-1_target.piControl.ne30pg2_r05_EC30to60E2r2-1900_ICG.compy'
modeldir = '/compyfs/malt823/E3SM_simulations/{}/archive/ocn/hist'.format(runname)
#runname = '20201022.alpha5_v1p-1_fallback.piControl.ne30pg2_r05_EC30to60E2r2-1900_ICG.compy'
#modeldir = '/compyfs/gola749/E3SM_simulations/{}/archive/ocn/hist'.format(runname)
#
#infiles = sorted(glob.glob('{}/{}.mpaso.hist.am.timeSeriesStatsMonthly.00*'.format(modeldir, runname)))[0]
infiles = sorted(glob.glob('{}/{}.mpaso.hist.am.timeSeriesStatsMonthly.00*'.format(modeldir, runname)))[0:600]
#infiles = sorted(glob.glob('{}/{}.mpaso.hist.am.timeSeriesStatsMonthly.00*'.format(modeldir, runname)))
print('\ninfiles={}\n'.format(infiles))
#variable = 'temperatureSurfaceFluxTendency'
#variable = 'temperatureShortWaveTendency'
#variable = 'temperatureHorizontalAdvectionTendency'
#variable = 'temperatureVerticalAdvectionTendency'
#variable = 'temperatureHorMixTendency'
#variable = 'temperatureVertMixTendency'
#variable = 'temperatureNonLocalTendency'
#variable = 'temperature'
#variable = 'temperatureTotalAdvectionTendency' # derived variable
#variable = 'temperatureVertMixLocalNonlocalTendency' # derived variable
variable = 'temperatureForcingTendency' # derived variable
#variable = 'temperatureSumTendencyTerms' # derived variable
#variable = 'temperatureTendency' # derived variable
figdir = './animations_xymaps'
if not os.path.isdir(figdir):
os.makedirs(figdir)
figsize = [16, 12]
figdpi = 100
# zmin,zmax over which to average
#zmin = -200.
#zmax = 0.
zmin = -1000.
zmax = -200.
#zmin = -6000.
#zmax = -1000.
colorIndices0 = [0, 10, 28, 57, 85, 113, 142, 170, 198, 227, 242, 255]
variables = [{'name': 'temperatureSurfaceFluxTendency',
'title': 'Surface flux tendency for temperature',
'units': '$^\circ$C/s (x1e-6)',
'mpas': 'timeMonthly_avg_activeTracerSurfaceFluxTendency_temperatureSurfaceFluxTendency',
'factor': 1e6,
'colormap': plt.get_cmap('RdBu_r'),
'clevels': [-4, -3, -2, -1, -0.5, 0.0, 0.5, 1, 2, 3, 4],
'plot_anomalies': False},
{'name': 'temperatureShortWaveTendency',
'title': 'Penetrating shortwave flux tendency for temperature',
'units': '$^\circ$C/s (x1e-6)',
'mpas': 'timeMonthly_avg_temperatureShortWaveTendency',
'factor': 1e6,
'colormap': plt.get_cmap('RdBu_r'),
'clevels': [-4, -3, -2, -1, -0.5, 0.0, 0.5, 1, 2, 3, 4],
'plot_anomalies': False},
{'name': 'temperatureForcingTendency',
'title': 'Total forcing tendency for temperature',
'units': '$^\circ$C/s (x1e-6)',
'mpas': None,
'factor': 1e6,
'colormap': plt.get_cmap('RdBu_r'),
#'clevels': [-2, -1.5, -1.0, -0.5, -0.1, 0.0, 0.1, 0.5, 1, 1.5, 2], # 0-200 m
'clevels': [-0.2, -0.15, -0.1, -0.05, -0.01, 0.0, 0.01, 0.05, 0.1, 0.15, 0.2], # 200-1000 m
#'clevels': [-4, -3, -2, -1, -0.5, 0.0, 0.5, 1, 2, 3, 4],
'plot_anomalies': False},
{'name': 'temperatureHorizontalAdvectionTendency',
'title': 'Horizontal advection tendency for temperature',
'units': '$^\circ$C/s (x1e-6)',
'mpas': 'timeMonthly_avg_activeTracerHorizontalAdvectionTendency_temperatureHorizontalAdvectionTendency',
'factor': 1e6,
'colormap': plt.get_cmap('RdBu_r'),
'clevels': [-4, -3, -2, -1, -0.5, 0.0, 0.5, 1, 2, 3, 4],
'plot_anomalies': False},
{'name': 'temperatureVerticalAdvectionTendency',
'title': 'Vertical advection tendency for temperature',
'units': '$^\circ$C/s (x1e-6)',
'mpas': 'timeMonthly_avg_activeTracerVerticalAdvectionTendency_temperatureVerticalAdvectionTendency',
'factor': 1e6,
'colormap': plt.get_cmap('RdBu_r'),
'clevels': [-4, -3, -2, -1, -0.5, 0.0, 0.5, 1, 2, 3, 4],
'plot_anomalies': False},
{'name': 'temperatureTotalAdvectionTendency',
'title': 'Total advection tendency for temperature',
'units': '$^\circ$C/s (x1e-6)',
'mpas': None,
'factor': 1e6,
'colormap': plt.get_cmap('RdBu_r'),
#'clevels': [-2, -1.5, -1.0, -0.5, -0.1, 0.0, 0.1, 0.5, 1, 1.5, 2], # 0-200 m
'clevels': [-0.2, -0.15, -0.1, -0.05, -0.01, 0.0, 0.01, 0.05, 0.1, 0.15, 0.2], # 200-1000 m
#'clevels': [-4, -3, -2, -1, -0.5, 0.0, 0.5, 1, 2, 3, 4],
'plot_anomalies': False},
{'name': 'temperatureHorMixTendency',
'title': 'Horizontal mixing tendency for temperature',
'units': '$^\circ$C/s (x1e-6)',
'mpas': 'timeMonthly_avg_activeTracerHorMixTendency_temperatureHorMixTendency',
'factor': 1e6,
'colormap': plt.get_cmap('RdBu_r'),
'clevels': [-4, -3, -2, -1, -0.5, 0.0, 0.5, 1, 2, 3, 4],
'plot_anomalies': False},
{'name': 'temperatureVertMixTendency',
'title': 'Vertical mixing tendency for temperature',
'units': '$^\circ$C/s (x1e-6)',
'mpas': 'timeMonthly_avg_activeTracerVertMixTendency_temperatureVertMixTendency',
'factor': 1e6,
'colormap': plt.get_cmap('RdBu_r'),
'clevels': [-4, -3, -2, -1, -0.5, 0.0, 0.5, 1, 2, 3, 4],
'plot_anomalies': False},
{'name': 'temperatureNonLocalTendency',
'title': 'Non-local kpp flux tendency for temperature',
'units': '$^\circ$C/s (x1e-6)',
'mpas': 'timeMonthly_avg_activeTracerNonLocalTendency_temperatureNonLocalTendency',
'factor': 1e6,
'colormap': plt.get_cmap('RdBu_r'),
'clevels': [-4, -3, -2, -1, -0.5, 0.0, 0.5, 1, 2, 3, 4],
'plot_anomalies': False},
{'name': 'temperatureVertMixLocalNonlocalTendency',
'title': 'Sum of local and non-local kpp mixing tendency for temperature',
'units': '$^\circ$C/s (x1e-6)',
'mpas': None,
'factor': 1e6,
'colormap': plt.get_cmap('RdBu_r'),
'clevels': [-0.2, -0.15, -0.1, -0.05, -0.01, 0.0, 0.01, 0.05, 0.1, 0.15, 0.2],
#'clevels': [-4, -3, -2, -1, -0.5, 0.0, 0.5, 1, 2, 3, 4],
'plot_anomalies': False},
{'name': 'temperatureSumTendencyTerms',
'title': 'Sum of all tendency terms for temperature',
'units': '$^\circ$C/s (x1e-6)',
'mpas': None,
'factor': 1e6,
'colormap': plt.get_cmap('RdBu_r'),
#'clevels': [-2, -1.5, -1.0, -0.5, -0.1, 0.0, 0.1, 0.5, 1, 1.5, 2], # 0-200 m
'clevels': [-0.2, -0.15, -0.1, -0.05, -0.01, 0.0, 0.01, 0.05, 0.1, 0.15, 0.2], # 200-1000 m
#'clevels': [-4, -3, -2, -1, -0.5, 0.0, 0.5, 1, 2, 3, 4],
'plot_anomalies': False},
{'name': 'temperatureTendency',
'title': 'Temperature tendency (derived)',
'units': '$^\circ$C/s (x1e-6)',
'mpas': 'timeMonthly_avg_activeTracers_temperature',
'factor': 1e6,
'colormap': plt.get_cmap('RdBu_r'),
'clevels': [-0.2, -0.15, -0.1, -0.05, -0.01, 0.0, 0.01, 0.05, 0.1, 0.15, 0.2],
#'clevels': [-4, -3, -2, -1, -0.5, 0.0, 0.5, 1, 2, 3, 4],
'plot_anomalies': False},
{'name': 'temperature',
'title': 'Temperature',
'units': '$^\circ$C',
'mpas': 'timeMonthly_avg_activeTracers_temperature',
'factor': 1,
#'colormap': plt.get_cmap('RdBu_r'),
#'clevels': [-1.8, -1.0, -0.5, 0.0, 0.5, 2.0, 4.0, 8.0, 12., 16., 22.],
'colormap': cmocean.cm.balance,
'clevels': [-2.0, -1.5, -1.0, -0.5, -0.1, 0.0, 0.1, 0.5, 1.0, 1.5, 2.0],
'plot_anomalies': True},
{'name': 'salinity',
'title': 'Salinity',
'units': 'PSU',
'mpas': 'timeMonthly_avg_activeTracers_salinity',
'factor': 1,
'colormap': cmocean.cm.haline,
'clevels': [27., 28., 29., 29.5, 30., 30.5, 31., 32., 33., 34., 35.],
'plot_anomalies': True},
{'name': 'potentialDensity',
'title': 'Potential Density',
'units': 'kg m$^{-3}$',
'mpas': 'timeMonthly_avg_potentialDensity',
'factor': 1,
'colormap': cmocean.cm.dense,
'clevels': [24., 25.5, 25.9, 26.2, 26.5, 26.7, 26.8, 26.85, 26.9, 27.1, 27.75],
'plot_anomalies': True}]
# Identify dictionary for desired variable
vardict = next(item for item in variables if item['name'] == variable)
varname = vardict['name']
mpasvarname = vardict['mpas']
factor = vardict['factor']
plot_anomalies = vardict['plot_anomalies']
vartitle = vardict['title']
varunits = vardict['units']
clevels = vardict['clevels']
colormap0 = vardict['colormap']
underColor = colormap0(colorIndices0[0])
overColor = colormap0(colorIndices0[-1])
if len(clevels) + 1 == len(colorIndices0):
# we have 2 extra values for the under/over so make the colormap
# without these values
colorIndices = colorIndices0[1:-1]
elif len(clevels) - 1 != len(colorIndices0):
# indices list must be either one element shorter
# or one element longer than colorbarLevels list
raise ValueError('length mismatch between indices and '
'colorbarLevels')
colormap = cols.ListedColormap(colormap0(colorIndices))
colormap.set_under(underColor)
colormap.set_over(overColor)
cnorm = cols.BoundaryNorm(clevels, colormap.N)
tic = time.perf_counter()
mesh = xr.open_dataset(meshfile)
lat = mesh.latCell.values
lon = mesh.lonCell.values
weights = np.cos(lat)
lat = np.rad2deg(lat)
lon = np.rad2deg(lon)
zMid = compute_zmid(mesh.bottomDepth, mesh.maxLevelCell, mesh.layerThickness).squeeze()
depthMask = np.logical_and(zMid >= zmin, zMid <= zmax)
depthMask.compute()
ds = xr.open_mfdataset(infiles, combine='nested', concat_dim='Time')
ds['depthMask'] = depthMask
layerThickness = ds.timeMonthly_avg_layerThickness.where(depthMask)
layerThicknessSum = layerThickness.sum(dim='nVertLevels')
ntime = ds.dims['Time']
toc = time.perf_counter()
print('\nReading data done in {:0.4f} seconds'.format(toc-tic))
if plot_anomalies:
figtitle0 = 'Anomaly'
else:
figtitle0 = ''
figfile = '{}/{}{}_depths{:04d}-{:04d}_{}.mp4'.format(figdir, varname,
figtitle0, np.abs(np.int(zmax)), np.abs(np.int(zmin)), runname)
figtitle0 = '{} {} (z={:d}-{:d} m)'.format(vartitle, figtitle0,
np.abs(np.int(zmax)),
np.abs(np.int(zmin)))
tic = time.perf_counter()
if varname=='temperatureTotalAdvectionTendency':
mpasvarname1 = 'timeMonthly_avg_activeTracerHorizontalAdvectionTendency_temperatureHorizontalAdvectionTendency'
mpasvarname2 = 'timeMonthly_avg_activeTracerVerticalAdvectionTendency_temperatureVerticalAdvectionTendency'
fld = ds[mpasvarname1] + ds[mpasvarname2]
elif varname=='temperatureVertMixLocalNonlocalTendency':
mpasvarname1 = 'timeMonthly_avg_activeTracerVertMixTendency_temperatureVertMixTendency'
mpasvarname2 = 'timeMonthly_avg_activeTracerNonLocalTendency_temperatureNonLocalTendency'
fld = ds[mpasvarname1] + ds[mpasvarname2]
elif varname=='temperatureForcingTendency':
mpasvarname1 = 'timeMonthly_avg_activeTracerSurfaceFluxTendency_temperatureSurfaceFluxTendency'
mpasvarname2 = 'timeMonthly_avg_temperatureShortWaveTendency'
fld = ds[mpasvarname1] + ds[mpasvarname2]
elif varname=='temperatureTendency':
temp = ds[mpasvarname]
first = np.nan*xr.ones_like(temp.isel(Time=0))
first = first.expand_dims('Time')
fld = temp.diff(dim='Time')/(30.4375*86400.) # assumes monthly values
fld = xr.concat([first, fld], dim='Time')
elif varname=='temperatureSumTendencyTerms':
mpasvarname1 = 'timeMonthly_avg_activeTracerSurfaceFluxTendency_temperatureSurfaceFluxTendency'
mpasvarname2 = 'timeMonthly_avg_temperatureShortWaveTendency'
mpasvarname3 = 'timeMonthly_avg_activeTracerHorizontalAdvectionTendency_temperatureHorizontalAdvectionTendency'
mpasvarname4 = 'timeMonthly_avg_activeTracerVerticalAdvectionTendency_temperatureVerticalAdvectionTendency'
mpasvarname5 = 'timeMonthly_avg_activeTracerHorMixTendency_temperatureHorMixTendency'
mpasvarname6 = 'timeMonthly_avg_activeTracerVertMixTendency_temperatureVertMixTendency'
mpasvarname7 = 'timeMonthly_avg_activeTracerNonLocalTendency_temperatureNonLocalTendency'
fld = ds[mpasvarname1] + ds[mpasvarname2] + ds[mpasvarname3] + ds[mpasvarname4] + \
ds[mpasvarname5] + ds[mpasvarname6] + ds[mpasvarname7]
else:
fld = ds[mpasvarname]
fld = (fld.where(depthMask)*layerThickness).sum(dim='nVertLevels') / layerThicknessSum
fld = factor*fld.values
if plot_anomalies:
fld = fld - fld[0, :]
print(varname, np.nanmin(fld), np.nanmax(fld))
toc = time.perf_counter()
print('\nDefining fld done in {:0.4f} seconds'.format(toc-tic))
tic = time.perf_counter()
#mean = np.average(fld, axis=1, weights=np.squeeze(weights))
#std = np.sqrt(np.average((fld - mean[:, np.newaxis])**2, axis=1, weights=np.squeeze(weights)))
mean = | np.nansum(fld*weights[np.newaxis, :], axis=1) | numpy.nansum |
#!/usr/bin/env python
#
# 20190222
# copied from "calc_stellar_mass_function.py", this code will superceed "calc_stellar_mass_function.py".
#
from __future__ import print_function
import os, sys, re, json, time, astropy
import numpy as np
from astropy.table import Table, Column, hstack
from copy import copy
from numpy import log, log10, power, sum, sqrt, pi, exp
pow = power
lg = log10
ln = log
from scipy.interpolate import InterpolatedUnivariateSpline, interp1d
if not (os.path.dirname(os.path.abspath(__file__)) in sys.path): sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import apply_cosmology
cosmo = apply_cosmology.cosmo
if sys.version_info.major >= 3:
long = int
else:
pass
#
# def
#
def Schechter_Function(lgM, phi, lg_M0, alpha):
#
# Schechter (1976)
#
# Phi(M) dM = (Phi_*) * (M/M_*)**(alpha) * exp(-M/M_*) dM/M_*
# = (Phi_*) * x**(alpha) * exp(-x) dx
# = (Phi_*) * 10**(lgx * alpha) * exp(-10**lgx) dx
# = (Phi_*) * 10**(lgx * alpha) * exp(-10**lgx) dlnx
# = (Phi_*) * 10**(lgx * alpha) * exp(-10**lgx) dlgx * ln(10)
# = (Phi_*) * 10**((lgM-lgM_*)*(alpha+1)) * exp(-10**(lgM-lgM_*)) * ln(10) dlgx
# = (Our_Phi_Phi_Schechter) dlgx
#
lgx = lgM-lg_M0
Phi_Schechter = phi * (10**(lgx*(alpha+1))) * (np.exp(-10**lgx)) * ln(10) # per dex and already multiplied ln(10), so that its integral directly equals \int Phi(M) / M dM
return Phi_Schechter
#
# def
#
def calc_SMF_Davidzon2017(z, lgMstar=None, galaxy_type = 'SFG'):
#
# Davidzon 2017 arXiv:1701.02734
# IMF: Chabrier 2003
# Outputs: lgMstar_grid, lgPhiMstar_grid
#
# check z
if not np.isscalar(z):
raise ValueError('Please input a float number as the redshift!')
#
# check galaxy_type
if not (type(galaxy_type) is str):
raise ValueError('Please input either "ALL", "SFG" or "QG" as the galaxy_type!')
else:
if not (galaxy_type in ['ALL', 'SFG', 'QG']):
raise ValueError('Please input either "ALL", "SFG" or "QG" as the galaxy_type!')
#
# make lgMstar
if lgMstar is None:
lgMstar_grid = np.linspace(6.0, 13.0, num=1000, endpoint=True)
else:
lgMstar_grid = lgMstar
#
# read SMF
tb_SMF = Table.read(os.path.dirname(os.path.dirname(__file__))+os.sep+'Data_Tables/datatables_SMF/datatable_Davidzon2017_SMF_'+galaxy_type+'.txt', format='ascii') # zLo zHi lgMchar Phi_1 alpha_1 Phi_2 alpha_2
SMF_zmin = np.min(tb_SMF['zLo'])
SMF_zmax = np.max(tb_SMF['zHi'])
#
# check z
if z < SMF_zmin or z > SMF_zmax:
raise ValueError('calc_SMF_Davidzon2017: The input redshift is out of the allowed range of %s -- %s!'%(SMF_zmin, SMF_zmax))
#
# spline SMF #<20190214># old method
#<20190214># SMF_z = (tb_SMF['zLo'].data + tb_SMF['zHi'].data) / 2.0
#<20190214># SMF_phi_1 = InterpolatedUnivariateSpline(SMF_z, tb_SMF['Phi_1'].data, k=1)(z)
#<20190214># SMF_phi_2 = InterpolatedUnivariateSpline(SMF_z, tb_SMF['Phi_2'].data, k=1)(z)
#<20190214># SMF_alpha_1 = InterpolatedUnivariateSpline(SMF_z, tb_SMF['alpha_1'].data, k=1)(z)
#<20190214># SMF_alpha_2 = InterpolatedUnivariateSpline(SMF_z, tb_SMF['alpha_2'].data, k=1)(z)
#<20190214># SMF_lgMchar = InterpolatedUnivariateSpline(SMF_z, tb_SMF['lgMchar'].data, k=1)(z)
#<20190214># #print('z, lgMchar, alpha_1, phi_1, alpha_2, phi_2 =', z, SMF_lgMchar, SMF_alpha_1, SMF_phi_1, SMF_alpha_2, SMF_phi_2)
#<20190214># SMF_PhiMstar = Schechter_Function(lgMstar_grid, SMF_phi_1, SMF_lgMchar, SMF_alpha_1) + \
#<20190214># Schechter_Function(lgMstar_grid, SMF_phi_2, SMF_lgMchar, SMF_alpha_2) # two component
#<20190214># lgPhiMstar_grid = np.log10(SMF_PhiMstar)
#
# spline SMF
lgPhiMstar_matrix = []
for k in range(len(tb_SMF)):
SMF_z = (tb_SMF['zLo'][k] + tb_SMF['zHi'][k]) / 2.0
SMF_phi_1 = tb_SMF['Phi_1'][k]
SMF_phi_2 = tb_SMF['Phi_2'][k]
SMF_alpha_1 = tb_SMF['alpha_1'][k]
SMF_alpha_2 = tb_SMF['alpha_2'][k]
SMF_lgMchar = tb_SMF['lgMchar'][k]
#print('z, lgMchar, alpha_1, phi_1, alpha_2, phi_2 =', z, SMF_lgMchar, SMF_alpha_1, SMF_phi_1, SMF_alpha_2, SMF_phi_2)
SMF_PhiMstar = Schechter_Function(lgMstar_grid, SMF_phi_1, SMF_lgMchar, SMF_alpha_1) + \
Schechter_Function(lgMstar_grid, SMF_phi_2, SMF_lgMchar, SMF_alpha_2) # two component
lgPhiMstar_grid = np.log10(SMF_PhiMstar)
lgPhiMstar_matrix.append(copy(lgPhiMstar_grid))
#
SMF_z = (tb_SMF['zLo'].data + tb_SMF['zHi'].data) / 2.0
lgPhiMstar_matrix = np.array(lgPhiMstar_matrix) # shape == (N_SMF_z, N_SMF_lgMstar, )
if z <= np.min(SMF_z):
lgPhiMstar_grid = lgPhiMstar_matrix[0]
elif z >= np.max(SMF_z):
lgPhiMstar_grid = lgPhiMstar_matrix[-1]
else:
lgPhiMstar_grid = interp1d(SMF_z, lgPhiMstar_matrix, axis=0, kind='linear')(z)
#print(lgPhiMstar_matrix.shape, SMF_z.shape, lgPhiMstar_grid.shape)
# fix nan
lgPhiMstar_grid[np.isnan(lgPhiMstar_grid)] = -100
lgPhiMstar_grid[(lgPhiMstar_grid<-100)] = -100
#
if lgMstar is None:
return lgMstar_grid, lgPhiMstar_grid
else:
return lgPhiMstar_grid
def calc_SMF_Moutard2016(z, lgMstar=None, galaxy_type = 'SFG'):
#
# <NAME>16 - SMF - https://ui.adsabs.harvard.edu/abs/2016A%26A...590A.103M/abstract
# IMF: Chabrier 2003
# Outputs: lgMstar_grid, lgPhiMstar_grid
#
# check z
if not np.isscalar(z):
raise ValueError('Please input a float number as the redshift!')
#
# check galaxy_type
if not (type(galaxy_type) is str):
raise ValueError('Please input either "ALL", "SFG" or "QG" as the galaxy_type!')
else:
if not (galaxy_type in ['ALL', 'SFG', 'QG']):
raise ValueError('Please input either "ALL", "SFG" or "QG" as the galaxy_type!')
#
# make lgMstar
if lgMstar is None:
lgMstar_grid = np.linspace(6.0, 13.0, num=1000, endpoint=True)
else:
lgMstar_grid = lgMstar
#
# read SMF
tb_SMF = Table.read(os.path.dirname(os.path.dirname(__file__))+os.sep+'Data_Tables/datatables_SMF/datatable_Moutard2016_SMF_'+galaxy_type+'.txt', format='ascii') # zLo zHi lgMchar Phi_1 alpha_1 Phi_2 alpha_2
SMF_zmin = np.min(tb_SMF['zLo'])
SMF_zmax = np.max(tb_SMF['zHi'])
#
# check z
if z < SMF_zmin or z > SMF_zmax:
raise ValueError('calc_SMF_Moutard2016: The input redshift is out of the allowed range of %s -- %s!'%(SMF_zmin, SMF_zmax))
#
# spline SMF
lgPhiMstar_matrix = []
for k in range(len(tb_SMF)):
SMF_z = (tb_SMF['zLo'][k] + tb_SMF['zHi'][k]) / 2.0
SMF_phi_1 = tb_SMF['Phi_1'][k]
SMF_phi_2 = tb_SMF['Phi_2'][k]
SMF_alpha_1 = tb_SMF['alpha_1'][k]
SMF_alpha_2 = tb_SMF['alpha_2'][k]
SMF_lgMchar = tb_SMF['lgMchar'][k]
#print('calc_SMF_Moutard2016: z %r, lgMchar %r, alpha_1 %r, phi_1 %r, alpha_2 %r, phi_2 %r'%(z, SMF_lgMchar, SMF_alpha_1, SMF_phi_1, SMF_alpha_2, SMF_phi_2))
SMF_PhiMstar = Schechter_Function(lgMstar_grid, SMF_phi_1, SMF_lgMchar, SMF_alpha_1) + \
Schechter_Function(lgMstar_grid, SMF_phi_2, SMF_lgMchar, SMF_alpha_2) # two component
lgPhiMstar_grid = np.log10(SMF_PhiMstar)
lgPhiMstar_matrix.append(copy(lgPhiMstar_grid))
#
SMF_z = (tb_SMF['zLo'].data + tb_SMF['zHi'].data) / 2.0
lgPhiMstar_matrix = np.array(lgPhiMstar_matrix) # shape == (N_SMF_z, N_SMF_lgMstar, )
if z <= np.min(SMF_z):
lgPhiMstar_grid = lgPhiMstar_matrix[0]
elif z >= np.max(SMF_z):
lgPhiMstar_grid = lgPhiMstar_matrix[-1]
else:
lgPhiMstar_grid = interp1d(SMF_z, lgPhiMstar_matrix, axis=0, kind='linear')(z)
#print(lgPhiMstar_matrix.shape, SMF_z.shape, lgPhiMstar_grid.shape)
# fix nan
lgPhiMstar_grid[np.isnan(lgPhiMstar_grid)] = -100
lgPhiMstar_grid[(lgPhiMstar_grid<-100)] = -100
#
if lgMstar is None:
return lgMstar_grid, lgPhiMstar_grid
else:
return lgPhiMstar_grid
def calc_SMF_Ilbert2013(z, lgMstar=None, galaxy_type = 'SFG'):
#
# Ilbert 2013
# IMF: Chabrier 2003
# Outputs: lgMstar_grid, lgPhiMstar_grid
#
# check z
if not np.isscalar(z):
raise ValueError('Please input a float number as the redshift!')
#
# check galaxy_type
if not (type(galaxy_type) is str):
raise ValueError('Please input either "ALL" or "SFG" as the galaxy_type!')
else:
if not (galaxy_type in ['ALL', 'SFG', 'QG']):
raise ValueError('Please input either "ALL" or "SFG" as the galaxy_type!')
#
# make lgMstar
if lgMstar is None:
lgMstar_grid = np.linspace(6.0, 13.0, num=1000, endpoint=True)
else:
lgMstar_grid = lgMstar
#
# read SMF
tb_SMF = Table.read(os.path.dirname(os.path.dirname(__file__))+os.sep+'Data_Tables/datatables_SMF/datatable_Ilbert2013_SMF_'+galaxy_type+'.txt', format='ascii') # zLo zHi lgMchar Phi_1 alpha_1 Phi_2 alpha_2
SMF_zmin = np.min(tb_SMF['zLo'])
SMF_zmax = np.max(tb_SMF['zHi'])
#
# check z
if z < SMF_zmin or z > SMF_zmax:
raise ValueError('calc_SMF_Ilbert2013: The input redshift is out of the allowed range of %s -- %s!'%(SMF_zmin, SMF_zmax))
#
# spline SMF
lgPhiMstar_matrix = []
for k in range(len(tb_SMF)):
SMF_z = (tb_SMF['zLo'][k] + tb_SMF['zHi'][k]) / 2.0
SMF_phi_1 = tb_SMF['Phi_1'][k]
SMF_phi_2 = tb_SMF['Phi_2'][k]
SMF_alpha_1 = tb_SMF['alpha_1'][k]
SMF_alpha_2 = tb_SMF['alpha_2'][k]
SMF_lgMchar = tb_SMF['lgMchar'][k]
#print('z, lgMchar, alpha_1, phi_1, alpha_2, phi_2 =', z, SMF_lgMchar, SMF_alpha_1, SMF_phi_1, SMF_alpha_2, SMF_phi_2)
SMF_PhiMstar = Schechter_Function(lgMstar_grid, SMF_phi_1, SMF_lgMchar, SMF_alpha_1) + \
Schechter_Function(lgMstar_grid, SMF_phi_2, SMF_lgMchar, SMF_alpha_2) # two component
lgPhiMstar_grid = np.log10(SMF_PhiMstar)
lgPhiMstar_matrix.append(copy(lgPhiMstar_grid))
#
SMF_z = (tb_SMF['zLo'].data + tb_SMF['zHi'].data) / 2.0
lgPhiMstar_matrix = np.array(lgPhiMstar_matrix) # shape == (N_SMF_z, N_SMF_lgMstar, )
if z <= np.min(SMF_z):
lgPhiMstar_grid = lgPhiMstar_matrix[0]
elif z >= np.max(SMF_z):
lgPhiMstar_grid = lgPhiMstar_matrix[-1]
else:
lgPhiMstar_grid = interp1d(SMF_z, lgPhiMstar_matrix, axis=0, kind='linear')(z)
#print(lgPhiMstar_matrix.shape, SMF_z.shape, lgPhiMstar_grid.shape)
# fix nan
lgPhiMstar_grid[np.isnan(lgPhiMstar_grid)] = -100
lgPhiMstar_grid[(lgPhiMstar_grid<-100)] = -100
#
if lgMstar is None:
return lgMstar_grid, lgPhiMstar_grid
else:
return lgPhiMstar_grid
def calc_SMF_Peng2010(z, lgMstar=None, galaxy_type='SFG'):
#
# <NAME> 2010
# IMF: Chabrier 2003
# Outputs: lgMstar_grid, lgPhiMstar_grid
#
# check z
if not np.isscalar(z):
raise ValueError('Please input a float number as the redshift!')
#
# check galaxy_type
if not (type(galaxy_type) is str):
raise ValueError('Please input either "ALL", "SFG" or "QG" as the galaxy_type!')
else:
if not (galaxy_type in ['SFG', 'QG']):
raise ValueError('Please input either "ALL", "SFG" or "QG" as the galaxy_type!')
#
# make lgMstar
if lgMstar is None:
lgMstar_grid = np.linspace(6.0, 13.0, num=1000, endpoint=True)
else:
lgMstar_grid = lgMstar
#
# galaxy_type
if galaxy_type == 'ALL':
galaxy_types = ['SFG', 'QG']
else:
galaxy_types = [galaxy_type]
#
# read SMF
for t_galaxy_type in galaxy_types:
tb_SMF = Table.read(os.path.dirname(os.path.dirname(__file__))+os.sep+'Data_Tables/datatables_SMF/datatable_PengYingjie2010_SMF_'+galaxy_type+'.txt', format='ascii') # zLo zHi lgMchar Phi_1 alpha_1 Phi_2 alpha_2
SMF_zmin = np.min(tb_SMF['zLo'])
SMF_zmax = np.max(tb_SMF['zHi'])
#
# there is only one redshift bin, but we still check the input z range
if z < SMF_zmin or z > SMF_zmax:
raise ValueError('calc_SMF_Peng2010: The input redshift is out of the allowed range of %s -- %s!'%(SMF_zmin, SMF_zmax))
#
# just calculate SMF without interpolation
SMF_z = (tb_SMF['zLo'].data + tb_SMF['zHi'].data) / 2.0
SMF_phi_1 = tb_SMF['Phi_1'].data
SMF_alpha_1 = tb_SMF['alpha_1'].data
SMF_lgMchar = tb_SMF['lgMchar'].data
SMF_PhiMstar = Schechter_Function(lgMstar_grid, SMF_phi_1, SMF_lgMchar, SMF_alpha_1) # one component
if t_galaxy_type == 'SFG':
SMF_PhiMstar_SFG = copy(SMF_PhiMstar)
elif t_galaxy_type == 'QG':
SMF_phi_2 = tb_SMF['Phi_2'].data
SMF_alpha_2 = tb_SMF['alpha_2'].data
SMF_PhiMstar_QG = SMF_PhiMstar + Schechter_Function(lgMstar_grid, SMF_phi_2, SMF_lgMchar, SMF_alpha_2) # two component QG SMF
#
if galaxy_type == 'SFG':
lgPhiMstar_grid = np.log10(SMF_PhiMstar_SFG)
elif galaxy_type == 'QG':
lgPhiMstar_grid = | np.log10(SMF_PhiMstar_QG) | numpy.log10 |
# Copyright 2021 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import cunumeric as num
def test():
x = num.array(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]
)
assert np.array_equal(x[0, :], [1, 2, 3, 4])
assert | np.array_equal(x[1:2, :], [[5, 6, 7, 8]]) | numpy.array_equal |
import numpy as np
import cv2
import os
import glob
from PIL import Image
import tqdm
from lib.utils import base_utils
num_train_imgs = 80000
max_objects_in_scene = 6
bg_dir = '/mnt/data/home/pengsida/Datasets/SUN2012pascalformat/JPEGImages/*.jpg'
bg_paths = glob.glob(bg_dir)
output_dir = '/mnt/data/home/pengsida/Datasets/tless/tless-mix'
output_rgb_dir = os.path.join(output_dir, 'rgb')
output_mask_dir = os.path.join(output_dir, 'mask')
tless_dir = '/home/pengsida/Datasets/tless/renders'
def cut_and_paste(img, mask, train_img, train_mask, instance_id):
ys, xs = np.nonzero(mask)
y_min, y_max = np.min(ys), np.max(ys)
x_min, x_max = np.min(xs), np.max(xs)
h, w = y_max - y_min, x_max - x_min
img_h, img_w = train_img.shape[0], train_img.shape[1]
dst_y, dst_x = np.random.randint(0, img_h-h), np.random.randint(0, img_w-w)
dst_ys, dst_xs = ys - y_min + dst_y, xs - x_min + dst_x
train_img[dst_ys, dst_xs] = img[ys, xs]
train_mask[dst_ys, dst_xs] = instance_id
def fuse():
W, H = 720, 540
noofobjects = 30
if not os.path.exists(output_rgb_dir):
os.makedirs(output_rgb_dir)
if not os.path.exists(output_mask_dir):
os.makedirs(output_mask_dir)
noofimages = {i+1: len(glob.glob(os.path.join(tless_dir, str(i+1), '*.pkl'))) for i in range(noofobjects)}
obj_info = []
for i in tqdm.tqdm(range(num_train_imgs)):
train_img = np.zeros((H, W, 3), dtype=np.uint8)
train_mask = np.zeros((H, W), dtype=np.uint8)
instance_id = 0
ann = []
for k in range(max_objects_in_scene):
obj_id = np.random.randint(0, noofobjects) + 1
img_id = | np.random.randint(0, noofimages[obj_id]) | numpy.random.randint |
import pytest
import numpy as np
import quanguru as qt
import tests.classes.Integration.thd._orDQHS as th
fp2ZExp = qt.readCSV("tests/classes/Integration/thd/thdData/fp2ZExp.txt")
fp2YExp = qt.readCSV("tests/classes/Integration/thd/thdData/fp2YExp.txt")
fp3ZExp = qt.readCSV("tests/classes/Integration/thd/thdData/fp3ZExp.txt")
fp3YExp = qt.readCSV("tests/classes/Integration/thd/thdData/fp3YExp.txt")
qp2ZExp = qt.readCSV("tests/classes/Integration/thd/thdData/qp2ZExp.txt")
qp2YExp = qt.readCSV("tests/classes/Integration/thd/thdData/qp2YExp.txt")
qp3ZExp = qt.readCSV("tests/classes/Integration/thd/thdData/qp3ZExp.txt")
qp3YExp = qt.readCSV("tests/classes/Integration/thd/thdData/qp3YExp.txt")
sfid2Y = qt.readCSV("tests/classes/Integration/thd/thdData/sfid2Y.txt")
sfid2Z = qt.readCSV("tests/classes/Integration/thd/thdData/sfid2Z.txt")
sfid3Y = qt.readCSV("tests/classes/Integration/thd/thdData/sfid3Y.txt")
sfid3Z = qt.readCSV("tests/classes/Integration/thd/thdData/sfid3Z.txt")
@pytest.mark.parametrize("bo", [False, True])
def test_thdFromSaved(bo):
th.simulation.run(p=bo)
assert np.allclose(fp2ZExp[0], th.simulation.results["fp2ZExp"][0])
assert np.allclose(fp2ZExp[1], th.simulation.results["fp2ZExp"][1])
assert np.allclose(fp2YExp[0], th.simulation.results["fp2YExp"][0])
assert np.allclose(fp2YExp[1], th.simulation.results["fp2YExp"][1])
assert np.allclose(fp3ZExp[0], th.simulation.results["fp3ZExp"][0])
assert np.allclose(fp3ZExp[1], th.simulation.results["fp3ZExp"][1])
assert np.allclose(fp3YExp[0], th.simulation.results["fp3YExp"][0])
assert np.allclose(fp3YExp[1], th.simulation.results["fp3YExp"][1])
assert np.allclose(qp2ZExp[0], th.simulation.results["qp2ZExp"][0])
assert | np.allclose(qp2ZExp[1], th.simulation.results["qp2ZExp"][1]) | numpy.allclose |
import time
import pickle
import numpy as np
import tensorflow as tf
def get_vars_underscope(scope, name):
returned_vars= []
for v in tf.global_variables():
if scope+'/'+name in v.op.name:
returned_vars+= [v]
return returned_vars
def timeit(f):
""" Decorator to time Any Function """
def timed(*args, **kwargs):
start_time = time.time()
result = f(*args, **kwargs)
end_time = time.time()
seconds = end_time - start_time
print(" [-] %s : %2.5f sec, which is %2.5f mins, which is %2.5f hours" %
(f.__name__, seconds, seconds / 60, seconds / 3600))
return result
return timed
def _debug(operation):
print("Layer_name: " + operation.op.name + " -Output_Shape: " + str(operation.shape.as_list()))
def output_confusion_matrix(confusion_matrix, file_name, num_classes):
file_output = open(file_name, 'w')
ans = ""
for i in range(num_classes):
ans += '{:>10} '.format(str(i))
file_output.write(ans)
file_output.write('\n')
for i in range(num_classes):
ans = ""
for j in range(num_classes + 1):
if j == 0:
ans += str(i)
else:
ans += '{:>10} '.format(str(confusion_matrix[i][j - 1]))
file_output.write(ans)
file_output.write('\n')
file_output.close()
def load_obj(name):
with open(name, 'rb') as f:
return pickle.load(f)
def save_obj(obj, name):
with open(name, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
@timeit
def get_class_weights(nclasses, npy_file):
"""
This function get the weights of every class from labels to use it in the loss while training
:param nclasses: Number of classes of labels
:param npy_file: the numpy file of the training ex: Y_train.npy
:return: class_weights: which is a numpy array contain the weights of all classes
"""
yy = np.load(npy_file)
label_to_frequency = {}
for c in range(nclasses):
class_mask = | np.equal(yy, c) | numpy.equal |
"""
Created on Mon Nov 05 03:52:36 2018
@author: Paul
"""
### Boiler-Plate ###
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import scipy as sp
from numpy import random
import time
import csv
from Class1_Eq import *
from Func import *
""" Change this value when changed in restart .i files """
global t_final
t_final = 10000 # seconds
global ss_fail_penalty
ss_fail_penalty = 700
global cost_multiplier_for_nucl_safety_grade
cost_multiplier_for_nucl_safety_grade = 5.0
###########################################################################
""""""""" Tri-System Option Class """"""""" ###########################
###########################################################################
class Option:
"""
Inputs:
x1 = Zion core loop x-optimization parameters
x2 = PERCS loop x-optimization parameters
x3 = PCS superstructure x-optimization parameters
y = PCS superstructure y-optimization parameters
Parameters:
*Individual optimization parameters (explained in __init__() function)
Core Loop:
cards = Array of RELAP5 card numbers with core loop value changes
i_vals = Array of column numbers for core loop value changes
vals = Array of new values for core loop value changes
T_fuel_cent_max = Maximum fuel centerline temperature (constraint)
T_clad_surf_max = Maximum cladding surface temperature (constraint)
MDNBR = Minimum departure from nucleate boiling ratio (constraint)
T_f_over_max = [Boolean] Did fuel temperature go over the max?
T_clad_surf_max = [Boolean] Did cladding temperature go over the max?
MDNBR_below_1 = [Boolean] Did MDNBR go below 1.0?
peanlized = [Boolean] Did the core loop receive a penalty?
failed = [Boolean] Did the RELAP5 core loop model fail early?
csvFileLocation = [String] Core's PyPost results file location
*Parameters for T, P, m_dot, H, & x_e core data from PyPost
k_eff = Effective multiplication factor per neutron cycle in core
rho_0 = Initial reactivity of the core
Bc = Cycle burn-up of the fuel [EFPD = effective full-power days]
nBc = Discharge burn-up of the fuel
cost_RCPs = Capital cost of RCPs
op_cost_RCPs = Operating cost of RCPs (40 yrs)
cost_total_fuel = Cost of UO2 fuel (40 yrs)
PERCS Loop:
list_card = Array of RELAP5 card numbers with PERCS value changes
list_i_change = Array of column numbers for PERCS value changes
list_change = Array of new values for PERCS value changes
len_diff_717 = Parameter used to calculate length of Pipe 717
n_tubes = Number of tubes w/in PERCS tank
m_MgCO3 = Mass of Magnesium Carbonate w/in PERCS tank
T_over_620 = [Boolean] Did the core outlet T go above 620K?
T_over_635 = [Boolean] Did the core outlet T go above 635K?
csvFileLocation2 = [String] PERCS's PyPost results file location
*Parameters for T & alpha PERCS data from PyPost
PERCS_failed = [Boolean] Did the PERCS RELAP5 model fail early?
PERCS_penalty = [Boolean] Did the PERCS receive a penalty?
cost_penalty = Multaplicative cost penalty if 'PERCS_failed' = TRUE
ss_fail = [Boolean] Redundant of Core's 'failed'
p716, p717 = Pipes 716 & 717 (for cost purposes)
support = Support structure for PERCS tank (for cost purposes)
hx = Fake heat exchanger (for cost purposes)
tank = PERCS tank (for cost purposes)
chemical = MgCO3 in tank (for cost purposes)
PCS Loop:
pinch_point = [Boolean]
s = Array of Stream instances for all 37 PCS superstructure streams
phx = PHX instance representing the Steam Generator
t1a, t1b, t1c, t2a, t2b = Turbines representing the diff. stages
t1, t2 = Actual turbines (for cost purposes)
t3, t4, t5 = Turbine instances for LPTs
ms1, ms2 = Moisture separator instances
rh1, rh2 = Reheater heat exchanger instances
cond = Condenser instance
fwh1, fwh2, fwh3, fwh4 = Feedwater heater instances
p1, p2, p3, p4, p5, p6 = Pump instances
Objective Functions:
W_rcp = Core Obj. 1 - Total work of RCPs
cost_1 = Core Obj. 2 - Total core loop costs
obj_1_1 = Normalized W_rcp
obj_1_2 = Normalized cost_1
fmm_1 = Maximin fitness value for core loop
cost_2 = PERCS Obj. 1 - Total PERCS equipment cost
dT_int = PERCS Obj. 2 - Integral of deviation of core outlet T
alpha = PERCS Obj. 3 - Consumption of MgCO3
obj_2_1 = Normalized cost_2
obj_2_2 = Normalized dT_int
obj_2_3 = Normalized alpha
fmm_2 = Maximin fitness value for PERCS loop
color = [String] PCS superstructure color/configuration
eff = PCS Obj. 1 - Thermodynamic Efficiency
cost_3 = PCS Obj. 2 - Total PCS equipment cost
obj_3_1 = Normalized eff
obj_3_2 = Normalized cost_3
fmm_3 = Maximin fitness value for PCS loop
obj_fmm_1 = Normalized fmm_1
obj_fmm_2 = Normalized fmm_2
obj_fmm_3 = Normalized fmm_3
fmm_o = Overall Maximin fitness value
Functions:
init_ZION_calcs() - Fills arrays to make core loop RELAP5 value changes
init_PERCS_calcs() - Fills arrays to make PERCS RELAP5 value changes
final_ZION_calcs() - Grabs PyPost data, Performs final core loop calcs
final_PERCS_calcs() - Grabs PyPost data, Performs final PERCS calcs
Alpha_calcs() - Grabs alpha PyPost data, Calcs overall Alpha
PCS_SS_calcs() - Calls solve_PCS(), Performs final PCS calcs
solve_PCS() - Fills out PCS superstructure & converges the cycle
"""
def __init__(self,x1_in,x2_in,x3_in,y_in):
self.opt_ID = 0
self.last_sec_penalty = False
# Define the x- and y-optimization parameter arrays
self.x1 = x1_in # ZION x-opt parameters
self.x2 = x2_in # PERCS x-opt parameters
self.x3 = x3_in # PCS x-opt parameters
self.y = y_in # PCS y-opt parameters
# Further define the ZION Core loop opt. parameters
self.R_f = self.x1[0] # ft (radius of fuel per pin)
self.H_fuel = self.x1[1] # ft (height of fuel pins)
self.Dh_00 = self.x1[2] # ft (hydraulic D of pipes _00)
self.Dh_12 = self.x1[3] # ft (hydraulic D of pipes _12)
self.Dh_14 = self.x1[4] # ft (hydraulic D of pipes _14)
# Further define the PERCS loop opt. parameters
self.R_tank = self.x2[0] # ft (radius of PERCS HX tank)
self.pitch = self.x2[1] # ft (pitch b/t tubes in PERCS)
self.D_h = self.x2[2] # ft (hydraulic D of tubes)
self.th = self.x2[3] # ft (thickness of tubes)
self.Len = self.x2[4] # ft (length of tubes / height of tank)
self.elev = self.x2[5] # ft (height diff. b/t core outlet & PERCS inlet)
# Further define the PCS superstructure x-opt. parameters
self.To_PHX = self.x3[0] # degC
self.Po_t1a = self.x3[1] # bar
self.mf_t1a = self.x3[2]
self.Po_t1b = self.x3[3] # bar
self.mf_t1b = self.x3[4]
self.Po_t1c = self.x3[5] # bar
self.Po_t2a = self.x3[6] # bar
self.mf_t2a = self.x3[7]
self.Po_t2b = self.x3[8] # bar
# Further define the PCS superstructure y-opt. parameters
self.y_ipt = self.y[0] # IPT
self.y_rh1 = self.y[1] # RH 1
self.y_rh2 = self.y[2] # RH 2
self.y_s14 = self.y[3] # s[14]
self.y_s4 = self.y[4] # s[4]
self.y_s5 = self.y[5] # s[5]
################################
""" Init stuff for ZION Core """
################################
# Initialize card, i_change, and change lists for ZION
self.cards = np.empty(119,dtype='<U32')
self.i_vals = np.zeros(119,dtype=int)
self.vals = np.zeros(119)
# Initiate the Booleans that tracks thermal design limit violations
self.T_fuel_cent_max = 2100 # degC
self.T_clad_surf_max = 348 # degC
self.MDNBR = 0
self.T_f_over_max = False
self.T_c_over_max = False
self.MDNBR_below_1 = False
self.penalized = False
self.failed = False
# Parameter data grabbed from .csv files using PyPost
self.csvFileLocation = 'None'
self.T_106 = 0.0 # degC
self.T_110 = 0.0 # degC
self.P_106 = 0.0 # bar
self.P_110 = 0.0 # bar
self.P_335 = np.zeros(6) # MPa
self.P_p_out = 0.0 # bar
self.m_dot_100 = 0.0 # kg/s
self.m_dot_335 = 0.0 # kg/s
self.m_dot_400 = 0.0 # kg/s
self.m_dot_600 = 0.0 # kg/s
self.m_dot_200 = 0.0 # kg/s
self.H_106 = 0.0 # kJ/kg
self.H_110 = 0.0 # kJ/kg
self.H_335_1 = 0.0 # kJ/kg
self.H_112_5 = 0.0 # kJ/kg
self.H_114 = 0.0 # kJ/kg
self.H_412_5 = 0.0 # kJ/kg
self.H_414 = 0.0 # kJ/kg
self.H_612_5 = 0.0 # kJ/kg
self.H_614 = 0.0 # kJ/kg
self.H_212_5 = 0.0 # kJ/kg
self.H_214 = 0.0 # kJ/kg
self.T_1336_1 = | np.zeros(6) | numpy.zeros |
import os
import sys
import tempfile
import unittest
import mock
import numpy
from chainer import cuda
from chainer import link
from chainer import links
from chainer import optimizers
from chainer.serializers import hdf5
from chainer import testing
from chainer.testing import attr
if hdf5._available:
import h5py
@unittest.skipUnless(hdf5._available, 'h5py is not available')
class TestHDF5Serializer(unittest.TestCase):
def setUp(self):
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
self.hdf5file = h5py.File(path, 'w')
self.serializer = hdf5.HDF5Serializer(self.hdf5file, compression=3)
self.data = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
def tearDown(self):
if hasattr(self, 'hdf5file'):
self.hdf5file.close()
if hasattr(self, 'temp_file_path'):
os.remove(self.temp_file_path)
def test_get_item(self):
child = self.serializer['x']
self.assertIsInstance(child, hdf5.HDF5Serializer)
self.assertEqual(child.group.name, '/x')
self.assertEqual(child.compression, 3)
def check_serialize(self, data):
ret = self.serializer('w', data)
dset = self.hdf5file['w']
self.assertIsInstance(dset, h5py.Dataset)
self.assertEqual(dset.shape, data.shape)
self.assertEqual(dset.size, data.size)
self.assertEqual(dset.dtype, data.dtype)
read = numpy.empty((2, 3), dtype=numpy.float32)
dset.read_direct(read)
numpy.testing.assert_array_equal(read, cuda.to_cpu(data))
self.assertEqual(dset.compression_opts, 3)
self.assertIs(ret, data)
def test_serialize_cpu(self):
self.check_serialize(self.data)
@attr.gpu
def test_serialize_gpu(self):
self.check_serialize(cuda.to_gpu(self.data))
def test_serialize_scalar(self):
ret = self.serializer('x', 10)
dset = self.hdf5file['x']
self.assertIsInstance(dset, h5py.Dataset)
self.assertEqual(dset.shape, ())
self.assertEqual(dset.size, 1)
self.assertEqual(dset.dtype, int)
read = numpy.empty((), dtype=numpy.int32)
dset.read_direct(read)
self.assertEqual(read, 10)
self.assertEqual(dset.compression_opts, None)
self.assertIs(ret, 10)
@unittest.skipUnless(hdf5._available, 'h5py is not available')
class TestHDF5Deserializer(unittest.TestCase):
def setUp(self):
self.data = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
with h5py.File(path, 'w') as f:
f.require_group('x')
f.create_dataset('y', data=self.data)
f.create_dataset('z', data=numpy.asarray(10))
self.hdf5file = h5py.File(path, 'r')
self.deserializer = hdf5.HDF5Deserializer(self.hdf5file)
def tearDown(self):
if hasattr(self, 'hdf5file'):
self.hdf5file.close()
if hasattr(self, 'temp_file_path'):
os.remove(self.temp_file_path)
def test_get_item(self):
child = self.deserializer['x']
self.assertIsInstance(child, hdf5.HDF5Deserializer)
self.assertEqual(child.group.name, '/x')
def check_deserialize(self, y):
ret = self.deserializer('y', y)
numpy.testing.assert_array_equal(cuda.to_cpu(y), self.data)
self.assertIs(ret, y)
def check_deserialize_none_value(self, y):
ret = self.deserializer('y', None)
numpy.testing.assert_array_equal(cuda.to_cpu(ret), self.data)
def test_deserialize_cpu(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize(y)
def test_deserialize_none_value_cpu(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize_none_value(y)
@attr.gpu
def test_deserialize_gpu(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize(cuda.to_gpu(y))
@attr.gpu
def test_deserialize_none_value_gpu(self):
y = | numpy.empty((2, 3), dtype=numpy.float32) | numpy.empty |
#!/usr/bin/python
##############################################################################
# Copyright 2016-2017 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import math
import warnings
from functools import reduce
from itertools import product
from operator import mul
import numpy as np
import pytest
from pyquil.gates import RX, RZ, CNOT, H, X, PHASE
from pyquil.paulis import (
PauliTerm,
PauliSum,
exponential_map,
exponentiate_commuting_pauli_sum,
ID,
exponentiate,
trotterize,
is_zero,
check_commutation,
commuting_sets,
term_with_coeff,
sI,
sX,
sY,
sZ,
ZERO,
is_identity,
)
from pyquil.quil import Program
from pyquil.simulation.tools import program_unitary
def isclose(a, b, rel_tol=1e-10, abs_tol=0.0):
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def test_simplify_terms():
term = PauliTerm("Z", 0) * -1.0 * PauliTerm("Z", 0)
assert term.id() == ""
assert term.coefficient == -1.0
term = PauliTerm("Z", 0) + PauliTerm("Z", 0, 1.0)
assert str(term) == "(2+0j)*Z0"
def test_get_qubits():
term = PauliTerm("Z", 0) * PauliTerm("X", 1)
assert term.get_qubits() == [0, 1]
sum_term = PauliTerm("X", 0, 0.5) + 0.5j * PauliTerm("Y", 10) * PauliTerm("Y", 0, 0.5j)
assert sum_term.get_qubits() == [0, 10]
def test_simplify_term_id_1():
term = PauliTerm("I", 0, 0.5)
assert term.id() == ""
assert term.coefficient == 0.5
def test_simplify_term_id_2():
term = 0.5 * ID()
assert term.id() == ""
assert term.coefficient == 0.5
def test_simplify_term_id_3():
s = 0.25 + 0.25 * ID()
terms = s.terms
assert len(terms) == 1
assert terms[0].id() == ""
assert terms[0].coefficient == 0.5
def test_simplify_term_single():
term = PauliTerm("Z", 0) * PauliTerm("I", 1) * PauliTerm("X", 2, 0.5j) * PauliTerm("Z", 0, 1.0)
assert term.id() == "X2"
assert term.coefficient == 0.5j
def test_simplify_term_xz():
term1 = (-0.5 * PauliTerm("X", 0)) * (-1.0 * PauliTerm("Z", 0))
term2 = -0.5 * PauliTerm("X", 0) * (-1.0) * PauliTerm("Z", 0)
term3 = 0.5 * PauliTerm("X", 0) * PauliTerm("Z", 0)
for term in [term1, term2, term3]:
assert term.id() == "Y0"
assert term.coefficient == -0.5j
def test_simplify_term_multindex():
term = PauliTerm("X", 0, coefficient=-0.5) * PauliTerm("Z", 0, coefficient=-1.0) * PauliTerm("X", 2, 0.5)
assert term.id(sort_ops=False) == "Y0X2"
assert term.coefficient == -0.25j
def test_simplify_sum_terms():
sum_term = PauliSum([PauliTerm("X", 0, 0.5), PauliTerm("Z", 0, 0.5j)])
str_sum_term = str(sum_term + sum_term)
assert str_sum_term == "(1+0j)*X0 + 1j*Z0" or str_sum_term == "1j*Z0 + (1+0j)*X0"
sum_term = PauliSum([PauliTerm("X", 0, 0.5), PauliTerm("X", 0, 0.5)])
assert str(sum_term.simplify()) == "(1+0j)*X0"
# test the simplify on multiplication
sum_term = PauliSum([PauliTerm("X", 0, 0.5), PauliTerm("X", 0, 0.5)])
assert str(sum_term * sum_term) == "(1+0j)*I"
def test_copy():
term = PauliTerm("X", 0, 0.5) * PauliTerm("X", 1, 0.5)
new_term = term.copy()
term = term * PauliTerm("X", 2, 0.5)
new_term = new_term * PauliTerm("X", 2, 0.5)
assert term == new_term # value equality
assert term is not new_term # ref inequality
assert term._ops is not new_term._ops
term = PauliTerm("X", 0, 0.5) * PauliTerm("X", 1, 0.5)
new_term = term * PauliTerm("X", 2, 0.5)
assert term != new_term
assert term is not new_term
assert term._ops is not new_term._ops
def test_len():
term = PauliTerm("Z", 0, 1.0) * PauliTerm("Z", 1, 1.0)
assert len(term) == 2
def test_sum_len():
pauli_sum = PauliTerm("Z", 0, 1.0) + PauliTerm("Z", 1, 1.0)
assert len(pauli_sum) == 2
def test_enumerate():
term = PauliTerm("Z", 0, 1.0) * PauliTerm("Z", 1, 1.0) * PauliTerm("X", 5, 5)
position_op_pairs = [(0, "Z"), (1, "Z"), (5, "X")]
for key, val in term:
assert (key, val) in position_op_pairs
def test_getitem():
term = PauliTerm("Z", 0, 1.0) * PauliTerm("Z", 1, 1.0) * PauliTerm("X", 5, 5)
assert term[0] == "Z"
assert term[1] == "Z"
assert term[2] == "I"
assert term[3] == "I"
assert term[4] == "I"
assert term[5] == "X"
assert len(term) == 3
def test_ids():
term_1 = PauliTerm("Z", 0, 1.0) * PauliTerm("Z", 1, 1.0) * PauliTerm("X", 5, 5)
term_2 = PauliTerm("X", 5, 5) * PauliTerm("Z", 0, 1.0) * PauliTerm("Z", 1, 1.0)
with pytest.warns(FutureWarning) as w:
assert term_1.id() == term_2.id()
assert "should be avoided" in str(w[0])
def test_ids_no_sort():
term_1 = PauliTerm("Z", 0, 1.0) * PauliTerm("Z", 1, 1.0) * PauliTerm("X", 5, 5)
term_2 = PauliTerm("X", 5, 5) * PauliTerm("Z", 0, 1.0) * PauliTerm("Z", 1, 1.0)
assert term_1.id(sort_ops=False) == "Z0Z1X5"
assert term_2.id(sort_ops=False) == "X5Z0Z1"
def test_operations_as_set():
term_1 = PauliTerm("Z", 0, 1.0) * PauliTerm("Z", 1, 1.0) * PauliTerm("X", 5, 5)
term_2 = PauliTerm("X", 5, 5) * PauliTerm("Z", 0, 1.0) * PauliTerm("Z", 1, 1.0)
assert term_1.operations_as_set() == term_2.operations_as_set()
def test_pauliop_inputs():
with pytest.raises(ValueError):
PauliTerm("X", -2)
def test_pauli_sum():
q_plus = 0.5 * PauliTerm("X", 0) + 0.5j * PauliTerm("Y", 0)
the_sum = q_plus * PauliSum([PauliTerm("X", 0)])
term_strings = [str(x) for x in the_sum.terms]
assert "(0.5+0j)*I" in term_strings
assert "(0.5+0j)*Z0" in term_strings
assert len(term_strings) == 2
assert len(the_sum.terms) == 2
the_sum = q_plus * PauliTerm("X", 0)
term_strings = [str(x) for x in the_sum.terms]
assert "(0.5+0j)*I" in term_strings
assert "(0.5+0j)*Z0" in term_strings
assert len(term_strings) == 2
assert len(the_sum.terms) == 2
the_sum = PauliTerm("X", 0) * q_plus
term_strings = [str(x) for x in the_sum.terms]
assert "(0.5+0j)*I" in term_strings
assert "(-0.5+0j)*Z0" in term_strings
assert len(term_strings) == 2
assert len(the_sum.terms) == 2
with pytest.raises(ValueError):
_ = PauliSum(sI(0))
with pytest.raises(ValueError):
_ = PauliSum([1, 1, 1, 1])
with pytest.raises(ValueError):
_ = the_sum * []
def test_ps_adds_pt_1():
term = ID()
b = term + term
assert str(b) == "(2+0j)*I"
assert str(b + term) == "(3+0j)*I"
assert str(term + b) == "(3+0j)*I"
def test_ps_adds_pt_2():
term = ID()
b = term + 1.0
assert str(b) == "(2+0j)*I"
assert str(b + 1.0) == "(3+0j)*I"
assert str(1.0 + b) == "(3+0j)*I"
b = sX(0) + 1.0
assert str(b) == "(1+0j)*X0 + (1+0j)*I"
b = 1.0 + sX(0)
assert str(b) == "(1+0j)*I + (1+0j)*X0"
def test_pauliterm_sub():
assert str(sX(1) - 2.0) == str(sX(1) + -2.0)
assert str(1.4 - sZ(1)) == str(1.4 + -1.0 * sZ(1))
def test_ps_sub():
term = 3 * ID()
b = term - 1.0
assert str(b) == "(2+0j)*I"
assert str(b - 1.0) == "(1+0j)*I"
assert str(1.0 - b) == "(-1+0j)*I"
b = 1.0 - sX(0)
assert str(b) == "(1+0j)*I + (-1+0j)*X0"
b = sX(0) - 1.0
assert str(b) == "(1+0j)*X0 + (-1+0j)*I"
def test_zero_terms():
term = PauliTerm("X", 0, 1.0) + PauliTerm("X", 0, -1.0) + PauliTerm("Y", 0, 0.5)
assert str(term) == "(0.5+0j)*Y0"
term = PauliTerm("X", 0, 1.0) + PauliTerm("X", 0, -1.0)
assert str(term) == "0j*I"
assert len(term.terms) == 1
term2 = term * PauliTerm("Z", 2, 0.5)
assert str(term2) == "0j*I"
term3 = PauliTerm("Z", 2, 0.5) + term
assert str(term3) == "(0.5+0j)*Z2"
term4 = PauliSum([])
assert str(term4) == "0j*I"
term = PauliSum([PauliTerm("X", 0, 0.0), PauliTerm("Y", 1, 1.0) * PauliTerm("Z", 2)])
assert str(term) == "0j*X0 + (1+0j)*Y1*Z2"
term = term.simplify()
assert str(term) == "(1+0j)*Y1*Z2"
def test_exponentiate_1():
# test rotation of single qubit
generator = PauliTerm("Z", 0, 1.0)
para_prog = exponential_map(generator)
prog = para_prog(1)
result_prog = Program().inst(RZ(2.0, 0))
assert prog == result_prog
def test_exponentiate_2():
# testing general 2-circuit
generator = PauliTerm("Z", 0, 1.0) * PauliTerm("Z", 1, 1.0)
para_prog = exponential_map(generator)
prog = para_prog(1)
result_prog = Program().inst(CNOT(0, 1)).inst(RZ(2.0, 1)).inst(CNOT(0, 1))
assert prog == result_prog
def test_exponentiate_bp0_ZX():
# testing change of basis position 0
generator = PauliTerm("X", 0, 1.0) * PauliTerm("Z", 1, 1.0)
param_prog = exponential_map(generator)
prog = param_prog(1)
result_prog = Program().inst([H(0), CNOT(0, 1), RZ(2.0, 1), CNOT(0, 1), H(0)])
assert prog == result_prog
def test_exponentiate_bp1_XZ():
# testing change of basis position 1
generator = PauliTerm("Z", 0, 1.0) * PauliTerm("X", 1, 1.0)
para_prog = exponential_map(generator)
prog = para_prog(1)
result_prog = Program().inst([H(1), CNOT(0, 1), RZ(2.0, 1), CNOT(0, 1), H(1)])
assert prog == result_prog
def test_exponentiate_bp0_ZY():
# testing change of basis position 0
generator = PauliTerm("Y", 0, 1.0) * PauliTerm("Z", 1, 1.0)
para_prog = exponential_map(generator)
prog = para_prog(1)
result_prog = Program().inst([RX(math.pi / 2.0, 0), CNOT(0, 1), RZ(2.0, qubit=1), CNOT(0, 1), RX(-math.pi / 2, 0)])
assert prog == result_prog
def test_exponentiate_bp1_YZ():
# testing change of basis position 1
generator = PauliTerm("Z", 0, 1.0) * PauliTerm("Y", 1, 1.0)
para_prog = exponential_map(generator)
prog = para_prog(1)
result_prog = Program().inst([RX(math.pi / 2.0, 1), CNOT(0, 1), RZ(2.0, 1), CNOT(0, 1), RX(-math.pi / 2.0, 1)])
assert prog == result_prog
def test_exponentiate_3cob():
# testing circuit for 3-terms with change of basis
generator = PauliTerm("Z", 0, 1.0) * PauliTerm("Y", 1, 1.0) * PauliTerm("X", 2, 1.0)
para_prog = exponential_map(generator)
prog = para_prog(1)
result_prog = Program().inst(
[
RX(math.pi / 2.0, 1),
H(2),
CNOT(0, 1),
CNOT(1, 2),
RZ(2.0, 2),
CNOT(1, 2),
CNOT(0, 1),
RX(-math.pi / 2.0, 1),
H(2),
]
)
assert prog == result_prog
def test_exponentiate_3ns():
# testing circuit for 3-terms non-sequential
generator = PauliTerm("Y", 0, 1.0) * PauliTerm("I", 1, 1.0) * PauliTerm("Y", 2, 1.0) * PauliTerm("Y", 3, 1.0)
para_prog = exponential_map(generator)
prog = para_prog(1)
result_prog = Program().inst(
[
RX(math.pi / 2.0, 0),
RX(math.pi / 2.0, 2),
RX(math.pi / 2.0, 3),
CNOT(0, 2),
CNOT(2, 3),
RZ(2.0, 3),
CNOT(2, 3),
CNOT(0, 2),
RX(-math.pi / 2.0, 0),
RX(-math.pi / 2.0, 2),
RX(-math.pi / 2.0, 3),
]
)
assert prog == result_prog
def test_exponentiate_commuting_pauli_sum():
pauli_sum = PauliSum([PauliTerm("Z", 0, 0.5), PauliTerm("Z", 1, 0.5)])
prog = Program().inst(RZ(1.0, 0)).inst(RZ(1.0, 1))
result_prog = exponentiate_commuting_pauli_sum(pauli_sum)(1.0)
assert prog == result_prog
def test_exponentiate_prog():
ham = PauliTerm("Z", 0)
result_prog = Program(RZ(2.0, 0))
prog = exponentiate(ham)
assert prog == result_prog
def test_exponentiate_identity():
generator = PauliTerm("I", 1, 0.0)
para_prog = exponential_map(generator)
prog = para_prog(1)
result_prog = Program()
assert prog == result_prog
generator = PauliTerm("I", 1, 1.0)
para_prog = exponential_map(generator)
prog = para_prog(1)
result_prog = Program().inst([X(0), PHASE(-1.0, 0), X(0), PHASE(-1.0, 0)])
assert prog == result_prog
generator = PauliTerm("I", 10, 0.08)
para_prog = exponential_map(generator)
prog = para_prog(1)
result_prog = Program().inst([X(0), PHASE(-0.08, 0), X(0), PHASE(-0.08, 0)])
assert prog == result_prog
def test_trotterize():
term_one = PauliTerm("X", 0, 1.0)
term_two = PauliTerm("Z", 0, 1.0)
with pytest.raises(ValueError):
trotterize(term_one, term_two, trotter_order=0)
with pytest.raises(ValueError):
trotterize(term_one, term_two, trotter_order=5)
prog = trotterize(term_one, term_one)
result_prog = Program().inst([H(0), RZ(2.0, 0), H(0), H(0), RZ(2.0, 0), H(0)])
assert prog == result_prog
# trotter_order 1 steps 1
prog = trotterize(term_one, term_two, trotter_steps=1)
result_prog = Program().inst([H(0), RZ(2.0, 0), H(0), RZ(2.0, 0)])
assert prog == result_prog
# trotter_order 1 steps 2
prog = trotterize(term_one, term_two, trotter_steps=2)
result_prog = Program().inst([H(0), RZ(1.0, 0), H(0), RZ(1.0, 0), H(0), RZ(1.0, 0), H(0), RZ(1.0, 0)])
assert prog == result_prog
# trotter_order 2 steps 1
prog = trotterize(term_one, term_two, trotter_order=2)
result_prog = Program().inst([H(0), RZ(1.0, 0), H(0), RZ(2.0, 0), H(0), RZ(1.0, 0), H(0)])
assert prog == result_prog
# trotter_order 2 steps 2
prog = trotterize(term_one, term_two, trotter_order=2, trotter_steps=2)
result_prog = Program().inst(
[
H(0),
RZ(0.5, 0),
H(0),
RZ(1.0, 0),
H(0),
RZ(0.5, 0),
H(0),
H(0),
RZ(0.5, 0),
H(0),
RZ(1.0, 0),
H(0),
RZ(0.5, 0),
H(0),
]
)
assert prog == result_prog
# trotter_order 3 steps 1
prog = trotterize(term_one, term_two, trotter_order=3, trotter_steps=1)
result_prog = Program().inst(
[
H(0),
RZ(14.0 / 24, 0),
H(0),
RZ(4.0 / 3.0, 0),
H(0),
RZ(1.5, 0),
H(0),
RZ(-4.0 / 3.0, 0),
H(0),
RZ(-2.0 / 24, 0),
H(0),
RZ(2.0, 0),
]
)
assert prog == result_prog
def test_trotterize_order():
def expmi(hermitian_matrix):
"""Compute the matrix exponential of -1j * hermitian_matrix."""
L, Q = | np.linalg.eigh(hermitian_matrix) | numpy.linalg.eigh |
"""
Created by <NAME> on January 14, 2020
This is an implementation of the paper IMITATION LEARNING FROM VISUAL DATA WITH MULTIPLE INTENTIONS
Tamar et. al.
"""
import torch
import torch.nn as nn
# from torch.distributions import RelaxedOneHotCategorical
import numpy as np
from torch.autograd import Variable
# from utils.global_utils import save_pickle
# import matplotlib.pyplot as plt
# import heapq
import os
import pickle
# noinspection PyUnresolvedReferences
torch.backends.cudnn.deterministic = True
# noinspection PyUnresolvedReferences
torch.backends.cudnn.benchmark = False
torch.manual_seed(0)
np.random.seed(0)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
epochs = 100000
class SNN(nn.Module):
"""
standard MLP
"""
def __init__(self, state_size):
super(SNN, self).__init__()
self.fc1 = nn.Linear(state_size + 1, 128)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(128, 32)
self.relu2 = nn.ReLU()
self.fc21 = nn.Linear(32, 32)
self.relu21 = nn.ReLU()
self.fc22 = nn.Linear(32, 32)
self.relu22 = nn.ReLU()
self.fc23 = nn.Linear(32, 32)
self.relu23 = nn.ReLU()
self.fc3 = nn.Linear(32, 3)
def forward(self, x, z):
"""
forward pass
:param x: state
:param z: random variable
:return:
"""
x = torch.cat([x, z.expand(x.shape[0], 1, 1)], dim=2)
x = self.fc1(x)
x = self.relu1(x)
x = self.fc2(x)
x = self.relu2(x)
x = self.fc21(x)
x = self.relu21(x)
x = self.fc22(x)
x = self.relu22(x)
x = self.fc23(x)
x = self.relu23(x)
x = self.fc3(x)
return x
# noinspection PyArgumentList
class EvalSNN:
"""
class that handles training and evaluating this approach
"""
def __init__(self):
self.state_dim = 5
self.output_dim = 3
self.model = SNN(self.state_dim)
self.criterion = nn.CrossEntropyLoss()
self.opt = torch.optim.SGD(self.model.parameters(), lr=.001)
# load in data
self.states, self.actions, self.failed_list, self.mturkcodes, self.indices_of_failed = self.load_in_data()
self.test_states, self.test_actions, self.test_failed_list, self.test_mturkcodes, self.test_indices_of_failed = self.load_in_test_data()
self.priority_queue = []
self.use_gpu = True
self.testing_accuracies = []
self.testing_stds = []
print(self.model.state_dict())
# checkpoint = torch.load('/home/Anonymous/PycharmProjects/bayesian_prolo/saved_models/HRI/SNN999999.tar')
# self.model.load_state_dict(checkpoint['nn_state_dict'])
@staticmethod
def load_in_data():
"""
loads in train data
:return:
"""
states, actions, failed_list, mturkcodes = pickle.load(open(os.path.join('../datasets/', 'testing_data_from_all_users_2.pkl'), 'rb'))
indices_of_failed = []
for i in failed_list:
if i[0] not in indices_of_failed:
indices_of_failed.append(i[0])
return states, actions, failed_list, mturkcodes, indices_of_failed
@staticmethod
def load_in_test_data():
"""
loads in test data
:return:
"""
states, actions, failed_list, mturkcodes = pickle.load(open(os.path.join('../datasets/', 'training_data_from_all_users_2.pkl'), 'rb'))
indices_of_failed = []
for i in failed_list:
if i[0] not in indices_of_failed:
indices_of_failed.append(i[0])
return states, actions, failed_list, mturkcodes, indices_of_failed
def find_max_traj_length(self):
max_length = 0
for i in self.states:
game_length = len(i)
if game_length > max_length:
max_length = game_length
return max_length
def train(self):
"""
Train the network
:return:
"""
loss_array = []
sampling_N = 5
length_of_longest_game = self.find_max_traj_length()
for epoch in range(epochs):
print('epoch: ', epoch)
# sample a timestep before the cutoff for cross_validation
which_user = np.random.choice(range(len(self.states)))
if which_user in self.indices_of_failed:
continue
states = self.states[which_user]
actions = self.actions[which_user]
length_of_current_game = len(states)
# sample z
sampled_z = np.random.rand(sampling_N)
# prepare network input
network_input = torch.zeros(length_of_current_game, 1, 5)
network_truth = torch.zeros(length_of_current_game, 1, 1)
z_loss_array = []
self.opt.zero_grad()
# load in network input
for e, i in enumerate(states):
state_t = states[e]
action_t = actions[e]
# iterate over pairwise comparisons
if self.use_gpu:
network_in = torch.tensor(state_t).reshape(1, 5).cuda()
action_t = torch.tensor(action_t).reshape(1).cuda()
else:
network_in = torch.tensor(state_t)
action_t = torch.tensor(action_t).reshape(1)
network_input[e] = network_in
truth = action_t
network_truth[e] = truth
# find chosen_z
for z in sampled_z:
network_output = self.model.forward(network_input.float(), torch.tensor(z))
loss = self.criterion(network_output.reshape(length_of_current_game, 3), network_truth.reshape(length_of_current_game).long())
z_loss_array.append(loss.item())
self.priority_queue.append((loss.item(), z))
# use lowest z to update network
lowest_loss_ind = | np.argmin(z_loss_array) | numpy.argmin |
# Numpy
import numpy as np
# Scipy
from scipy.stats import linregress
# Matplotlib
import matplotlib.pyplot as plt
def plot_spectrum(spectrum, freqs=None, drop_zero_frequency=True, ax=None,
xlog=False, ylog=False, loglog=False,
variance_preserving=False, xlim=None,
ylim=None, title=None, **kwargs):
"""Define a nice spectrum with twin x-axis, one with frequencies, the
other one with periods.
Parameters
----------
spectrum : 1d xarray.DataArray or 1darray
The array where the spectrum is stored
freqs: 1d vector, optional
The frequency vector. If None, the frequency vector is inferred
from the DataArray
drop_zero_frequency : bool, optional
If True, do not plot the zero frequency
ax : matplotlib axes, optional
If None, uses the current axis.
xlog : bool, optional
If True, use log scaling for the x axis
ylog : bool, optional
If True, use log scaling for the y axis
loglog : bool, optional
If True, use log scaling for both axis
variance_preserving : bool, optional
If True, scale the spectrum by the log of frequencies to use the
variance preserving form
xlim : tuple, optional
Set x-axis limits
ylim : tuple, optional
Set y-axis limits
title : string, optional
Set the title
**kwargs : optional
Additional arguments to matplotlib.pyplot.plot
"""
if ax is None:
ax = plt.gca()
if freqs is None:
freqs = spectrum[spectrum.dims[0]]
if drop_zero_frequency:
spectrum = spectrum.where(freqs != 0.)
freqs = freqs.where(freqs != 0.)
#import pytest
#pytest.set_trace()
if variance_preserving:
spectrum = freqs * spectrum
xlog = True
ax.plot(freqs, spectrum, **kwargs)
if xlog or loglog:
ax.set_xscale('log', nonposx='clip')
try:
xmin = np.ceil(np.log10(np.abs(xlim[0]))) - 1
xmax = np.ceil(np.log10(np.abs(xlim[1])))
ax.set_xlim((10 ** xmin, 10 ** xmax))
except TypeError:
try:
xmin = np.ceil(np.log10(abs(freqs[1]))) - 1
xmax = np.ceil(np.log10(abs(freqs[-1])))
ax.set_xlim((10 ** xmin, 10 ** xmax))
except TypeError:
pass
else:
ax.set_xlim(xlim)
if ylog or loglog:
ax.set_yscale('log', nonposy='clip')
try:
ymin = np.ceil(np.log10(np.abs(ylim[0]))) - 1
ymax = np.ceil(np.log10(np.abs(ylim[1])))
ax.set_ylim((10 ** ymin, 10 ** ymax))
except TypeError:
try:
ymin = np.ceil(np.log10(spectrum.min())) - 1
ymax = np.ceil(np.log10(spectrum.max()))
ax.set_ylim((10 ** ymin, 10 ** ymax))
except TypeError:
pass
else:
ax.set_ylim(ylim)
twiny = ax.twiny()
if xlog or loglog:
twiny.set_xscale('log', nonposx='clip')
twiny.set_xlim((10 ** xmin, 10 ** xmax))
new_major_ticks = 10 ** np.arange(xmin + 1, xmax, 1)
new_major_ticklabels = 1. / new_major_ticks
new_major_ticklabels = ["%.3g" % i for i in new_major_ticklabels]
twiny.set_xticks(new_major_ticks)
twiny.set_xticklabels(new_major_ticklabels, rotation=60, fontsize=12)
A = np.arange(2, 10, 2)[np.newaxis]
B = 10 ** (np.arange(-xmax, -xmin, 1)[np.newaxis])
C = np.dot(B.transpose(), A)
new_minor_ticklabels = C.flatten()
new_minor_ticks = 1. / new_minor_ticklabels
new_minor_ticklabels = ["%.3g" % i for i in new_minor_ticklabels]
twiny.set_xticks(new_minor_ticks, minor=True)
twiny.set_xticklabels(new_minor_ticklabels, minor=True, rotation=60,
fontsize=12)
ax.grid(True, which='both')
def plot_power_law(power, scale_factor=1., ax=None, **kwargs):
"""Plot a logarithmic power law
Parameters
----------
power : float
The exponent of the power law
scale_factor : float, optional
The factor to scale the power law with
ax : matplotlib axes, optional
If None, uses the current axis.
**kwargs : optional
Additional arguments to matplotlib.pyplot.plot
Returns
-------
lines : Line2D
Return a Line2D object created by the matplotlib.axes.Axes.plot method
"""
if ax is None:
ax = plt.gca()
xlim = np.array(ax.get_xlim())
power_law = scale_factor * xlim ** power
return ax.plot(xlim, power_law, **kwargs)
def fit_power_law(freq, spectrum):
"""Fit a logarithmic spectral law based on the input one
dimensional spectrum
Parameters
----------
freq : 1darray
The frequency coordinates
spectrum : 1darray
The one-dimensional spectrum
Returns
-------
power : float
The power characteristic of a power law spectrul
scale_factor: float
The scale factor related to fit the power law with the input spectrum
"""
power, intercept, _, _, _ = linregress(np.log(freq), np.log(spectrum))
scale_factor = | np.exp(intercept) | numpy.exp |
"""Several tools for SAEM."""
import numpy as np
def detectLinesAlongAxis(rx, ry, axis='x', sort=True, show=False):
"""Alernative - Split data in lines for line-wise processing."""
if axis == 'x':
r = rx
elif axis == 'y':
r = ry
else:
print('Choose either *x* or *y* axis. Aborting this method ...')
return
dummy = np.zeros_like(rx, dtype=int)
line = np.zeros_like(rx, dtype=int)
li = 0
last_sign = np.sign(r[1] - r[0])
for ri in range(1, len(rx)):
sign = np.sign(r[ri] - r[ri-1])
dummy[ri-1] = li
if sign != last_sign:
li += 1
last_sign *= -1
dummy[-1] = li
if sort:
means = []
for li in np.unique(dummy):
if axis == 'x':
means.append(np.mean(ry[dummy==li], axis=0))
elif axis == 'y':
means.append(np.mean(rx[dummy==li], axis=0))
lsorted = np.argsort(means)
for li, lold in enumerate(lsorted):
line[dummy==lold] = li + 1
return line
def detectLinesByDistance(rx, ry, axis='x', sort=True, show=False,
minDist=200.):
"""Split data in lines for line-wise processing."""
dummy = np.zeros_like(rx, dtype=int)
line = np.zeros_like(rx, dtype=int)
li = 0
for ri in range(1, len(rx)):
dummy[ri-1] = li
dist = np.sqrt((rx[ri]-rx[ri-1])**2 +\
(ry[ri]-ry[ri-1])**2)
if dist > minDist:
li += 1
dummy[-1] = li
if sort:
means = []
for li in np.unique(dummy):
if axis == 'x':
means.append(np.mean(ry[dummy==li], axis=0))
elif axis == 'y':
means.append(np.mean(rx[dummy==li], axis=0))
lsorted = np.argsort(means)
for li, lold in enumerate(lsorted):
line[dummy==lold] = li + 1
return line
def detectLinesOld(rx, ry, show=False):
"""Split data in lines for line-wise processing."""
dt = np.sqrt(np.diff(rx)**2 + np.diff(ry)**2)
dtmin = np.median(dt) * 2
dx = np.round( | np.diff(rx) | numpy.diff |
import numpy as np
from skimage.restoration import denoise_nl_means, estimate_sigma
from skimage.exposure import rescale_intensity, match_histograms
import dask
dask.config.set(scheduler="threads")
# from dask.diagnostics import ProgressBar
def nl_means_layered(im, cores=None, axis=0, patch_size=5, patch_distance=15, h=4):
r"""
Apply the non-local means filter to each 2D layer of a stack in parallel.
This applies ``skimage.restoration.denoise_nl_means`` to each slice, so
refer to the documentation of that function for further information.
Parameters
----------
im : ndarray
The greyscale image with noise to be removed
cores : int (optional)
The number of cores to use for the processing. By default all
available cores are used.
axis : int
The axis along which slices should be taken. This should
correspond to the axis of rotation of the tomography stage, so if
the sample was rotated about the z-axis, then use ``axis=2``.
patch_size : int
Size of patches used for denoising
patch_distance : int
Maximal distance in pixels where to search patches used for
denoising.
h : float
Cut-off distance (in gray levels). The higher ``h``, the more
permissive one is in accepting patches. A higher h results in a
smoother image, at the expense of blurring features. For a
Gaussian noise of standard deviation sigma, a rule of thumb is to
choose the value of ``h`` to be sigma of slightly less.
Notes
-----
The quality of the result depends on ``patch_size``,
``patch_distance``, ``h``, and ``sigma``. It is recommended to
experiment with a single slice first until a suitable set of
parameters is found.
Each slice in the stack is adjusted to have the same histogram and
intensity.
"""
@dask.delayed
def apply_func(func, **kwargs):
return func(**kwargs)
temp = np.copy(im)
for i in range(im.shape[2]):
temp[:, :, i] = match_histograms(temp[:, :, i], temp[:, :, 0],
multichannel=False)
p2, p98 = np.percentile(temp, (2, 98))
temp = rescale_intensity(temp, in_range=(p2, p98))
temp = temp / temp.max()
sigma_est = np.mean(estimate_sigma(temp[:, :, 0], multichannel=False))
kw = {'image': temp,
'patch_size': patch_size,
'patch_distance': patch_distance,
'h': h * sigma_est,
'multichannel': False,
'fast_mode': True}
temp = np.swapaxes(temp, 0, axis)
results = []
for i in range(im.shape[2]):
layer = temp[i, ...]
kw["image"] = layer
t = apply_func(func=denoise_nl_means, **kw)
results.append(t)
# with ProgressBar():
# ims = dask.compute(results, num_workers=cores)[0]
ims = dask.compute(results, num_workers=cores)[0]
result = np.array(ims)
result = | np.swapaxes(result, 0, axis) | numpy.swapaxes |
# Copyright (c) 2015, <NAME>
# See LICENSE file for details: <https://github.com/moble/scri/blob/master/LICENSE>
import math
import numpy as np
import quaternion
import scri
import spherical_functions as sf
import warnings
def modes_constructor(constructor_statement, data_functor, **kwargs):
"""WaveformModes object filled with data from the input functor
Additional keyword arguments are mostly passed to the WaveformModes initializer, though some more reasonable
defaults are provided.
Parameters
----------
constructor_statement : str
This is a string form of the function call used to create the object. This is passed to the WaveformBase
initializer as the parameter of the same name. See the docstring for more information.
data_functor : function
Takes a 1-d array of time values and an array of (ell, m) values and returns the complex array of data.
t : float array, optional
Time values of the data. Default is `np.linspace(-10., 100., num=1101))`.
ell_min, ell_max : int, optional
Smallest and largest ell value present in the data. Defaults are 2 and 8.
"""
t = np.array(kwargs.pop("t", np.linspace(-10.0, 100.0, num=1101)), dtype=float)
frame = np.array(kwargs.pop("frame", []), dtype=np.quaternion)
frameType = int(kwargs.pop("frameType", scri.Inertial))
dataType = int(kwargs.pop("dataType", scri.h))
r_is_scaled_out = bool(kwargs.pop("r_is_scaled_out", True))
m_is_scaled_out = bool(kwargs.pop("m_is_scaled_out", True))
ell_min = int(kwargs.pop("ell_min", abs(scri.SpinWeights[dataType])))
ell_max = int(kwargs.pop("ell_max", 8))
if kwargs:
import pprint
warnings.warn(f"\nUnused kwargs passed to this function:\n{pprint.pformat(kwargs, width=1)}")
data = data_functor(t, sf.LM_range(ell_min, ell_max))
w = scri.WaveformModes(
t=t,
frame=frame,
data=data,
history=["# Called from constant_waveform"],
frameType=frameType,
dataType=dataType,
r_is_scaled_out=r_is_scaled_out,
m_is_scaled_out=m_is_scaled_out,
constructor_statement=constructor_statement,
ell_min=ell_min,
ell_max=ell_max,
)
return w
def constant_waveform(**kwargs):
"""WaveformModes object with constant values in each mode
Additional keyword arguments are passed to `modes_constructor`.
"""
if kwargs:
import pprint
warnings.warn(f"\nUnused kwargs passed to this function:\n{pprint.pformat(kwargs, width=1)}")
def data_functor(t, LM):
data = np.zeros((t.shape[0], LM.shape[0]), dtype=complex)
for i, m in enumerate(LM[:, 1]):
data[:, i] = m - 1j * m
return data
return modes_constructor(f"constant_waveform(**{kwargs})", data_functor)
def single_mode(ell, m, **kwargs):
"""WaveformModes object with 1 in selected slot and 0 elsewhere
Additional keyword arguments are passed to `modes_constructor`.
Parameters
----------
ell, m : int
The (ell, m) value of the nonzero mode
"""
if kwargs:
import pprint
warnings.warn(f"\nUnused kwargs passed to this function:\n{pprint.pformat(kwargs, width=1)}")
def data_functor(t, LM):
data = np.zeros((t.shape[0], LM.shape[0]), dtype=complex)
data[:, sf.LM_index(ell, m, min(LM[:, 0]))] = 1.0 + 0.0j
return data
return modes_constructor(f"single_mode({ell}, {m}, **{kwargs})", data_functor)
def random_waveform(**kwargs):
"""WaveformModes object filled with random data at each time step
Additional keyword arguments are passed to `modes_constructor`.
Parameters
----------
uniform_time : bool, optional
Use uniform, rather than random, time steps. Default is False.
begin, end : float, optional
Initial and final times of the time data. Default values are -10., 100.
n_times : int, optional
Number of time steps in the time data. Default is 1101
rotating : bool, optional
If True, use a `Corotating` frame, with random orientation at each instant. Default is True.
"""
np.random.seed(hash("random_waveform") % 4294967294)
begin = float(kwargs.pop("begin", -10.0))
end = float(kwargs.pop("end", 100.0))
n_times = int(kwargs.pop("n_times", 1101))
if kwargs.pop("uniform_time", False):
t = np.array(kwargs.pop("t", np.linspace(begin, end, num=n_times)), dtype=float)
else:
t = np.sort(np.random.uniform(begin, end, size=n_times))
rotating = bool(kwargs.pop("rotating", True))
if kwargs:
import pprint
warnings.warn(f"\nUnused kwargs passed to this function:\n{pprint.pformat(kwargs, width=1)}")
def data_functor(tin, LM):
data = np.random.normal(size=(tin.shape[0], LM.shape[0], 2)).view(complex)[:, :, 0]
return data
if rotating:
frame = np.array([np.quaternion(*np.random.uniform(-1, 1, 4)).normalized() for t_i in t])
return modes_constructor(
f"random_waveform(**{kwargs})", data_functor, t=t, frame=frame, frameType=scri.Corotating
)
else:
return modes_constructor(f"random_waveform(**{kwargs})", data_functor, t=t)
def random_waveform_proportional_to_time(**kwargs):
"""WaveformModes object filled with random data times the time coordinate
Additional keyword arguments are passed to `modes_constructor`.
Parameters
----------
uniform_time : bool, optional
Use uniform, rather than random, time steps. Default is False.
begin, end : float, optional
Initial and final times of the time data. Default values are -10., 100.
n_times : int, optional
Number of time steps in the time data. Default is 1101
rotating : bool, optional
If True, use a `Corotating` frame, with random orientation at each instant. Default is True.
"""
np.random.seed(hash("random_waveform_proportional_to_time") % 4294967294) # Use mod to get in an acceptable range
begin = float(kwargs.pop("begin", -10.0))
end = float(kwargs.pop("end", 100.0))
n_times = int(kwargs.pop("n_times", 1101))
if kwargs.pop("uniform_time", False):
t = np.array(kwargs.pop("t", np.linspace(begin, end, num=n_times)), dtype=float)
else:
t = np.sort(np.random.uniform(begin, end, size=n_times))
rotating = bool(kwargs.pop("rotating", True))
if kwargs:
import pprint
warnings.warn(f"\nUnused kwargs passed to this function:\n{pprint.pformat(kwargs, width=1)}")
def data_functor(tin, LM):
return np.outer(tin, np.random.normal(size=(LM.shape[0], 2)).view(complex)[:, 0])
if rotating:
axis = np.quaternion(0.0, *np.random.uniform(-1, 1, size=3)).normalized()
omega = 2 * np.pi * 4 / (t[-1] - t[0])
frame = np.array([np.exp(axis * (omega * t_i / 2)) for t_i in t])
return modes_constructor(
f"random_waveform(**{kwargs})", data_functor, t=t, frame=frame, frameType=scri.Corotating
)
else:
return modes_constructor(f"random_waveform(**{kwargs})", data_functor, t=t)
def single_mode_constant_rotation(**kwargs):
"""Return WaveformModes object a single nonzero mode, with phase proportional to time
The waveform output by this function will have just one nonzero mode. The behavior of that mode will be fairly
simple; it will be given by exp(i*omega*t). Note that omega can be complex, which gives damping.
Parameters
----------
s : int, optional
Spin weight of the waveform field. Default is -2.
ell, m : int, optional
The (ell, m) values of the nonzero mode in the returned waveform. Default value is (abs(s), -abs(s)).
ell_min, ell_max : int, optional
Smallest and largest ell values present in the output. Default values are abs(s) and 8.
data_type : int, optional
Default value is whichever psi_n corresponds to the input spin. It is important to choose these, rather than
`h` or `sigma` for the analytical solution to translations, which doesn't account for the direct contribution
of supertranslations (as opposed to the indirect contribution, which involves moving points around).
t_0, t_1 : float, optional
Beginning and end of time. Default values are -20. and 20.
dt : float, optional
Time step. Default value is 0.1.
omega : complex, optional
Constant of proportionality such that nonzero mode is exp(i*omega*t). Note that this can be complex, which
implies damping. Default is 0.5.
"""
s = kwargs.pop("s", -2)
ell = kwargs.pop("ell", abs(s))
m = kwargs.pop("m", -ell)
ell_min = kwargs.pop("ell_min", abs(s))
ell_max = kwargs.pop("ell_max", 8)
data_type = kwargs.pop("data_type", scri.DataType[scri.SpinWeights.index(s)])
t_0 = kwargs.pop("t_0", -20.0)
t_1 = kwargs.pop("t_1", 20.0)
dt = kwargs.pop("dt", 1.0 / 10.0)
t = np.arange(t_0, t_1 + dt, dt)
n_times = t.size
omega = complex(kwargs.pop("omega", 0.5))
data = np.zeros((n_times, sf.LM_total_size(ell_min, ell_max)), dtype=complex)
data[:, sf.LM_index(ell, m, ell_min)] = np.exp(1j * omega * t)
if kwargs:
import pprint
warnings.warn(f"\nUnused kwargs passed to this function:\n{pprint.pformat(kwargs, width=1)}")
return scri.WaveformModes(
t=t,
data=data,
ell_min=ell_min,
ell_max=ell_max,
frameType=scri.Inertial,
dataType=data_type,
r_is_scaled_out=True,
m_is_scaled_out=True,
)
def single_mode_proportional_to_time(**kwargs):
"""Return WaveformModes object a single nonzero mode, proportional to time
The waveform output by this function will have just one nonzero mode. The behavior of that mode will be
particularly simple; it will just be proportional to time.
Parameters
----------
s : int, optional
Spin weight of the waveform field. Default is -2.
ell, m : int, optional
The (ell, m) values of the nonzero mode in the returned waveform. Default value is (abs(s), -abs(s)).
ell_min, ell_max : int, optional
Smallest and largest ell values present in the output. Default values are abs(s) and 8.
data_type : int, optional
Default value is whichever psi_n corresponds to the input spin. It is important to choose these, rather than
`h` or `sigma` for the analytical solution to translations, which doesn't account for the direct contribution
of supertranslations (as opposed to the indirect contribution, which involves moving points around).
t_0, t_1 : float, optional
Beginning and end of time. Default values are -20. and 20.
dt : float, optional
Time step. Default value is 0.1.
beta : complex, optional
Constant of proportionality such that nonzero mode is beta*t. Default is 1.
"""
s = kwargs.pop("s", -2)
ell = kwargs.pop("ell", abs(s))
m = kwargs.pop("m", -ell)
ell_min = kwargs.pop("ell_min", abs(s))
ell_max = kwargs.pop("ell_max", 8)
data_type = kwargs.pop("data_type", scri.DataType[scri.SpinWeights.index(s)])
t_0 = kwargs.pop("t_0", -20.0)
t_1 = kwargs.pop("t_1", 20.0)
dt = kwargs.pop("dt", 1.0 / 10.0)
t = np.arange(t_0, t_1 + dt, dt)
n_times = t.size
beta = kwargs.pop("beta", 1.0)
data = np.zeros((n_times, sf.LM_total_size(ell_min, ell_max)), dtype=complex)
data[:, sf.LM_index(ell, m, ell_min)] = beta * t
if kwargs:
import pprint
warnings.warn(f"\nUnused kwargs passed to this function:\n{pprint.pformat(kwargs, width=1)}")
return scri.WaveformModes(
t=t,
data=data,
ell_min=ell_min,
ell_max=ell_max,
frameType=scri.Inertial,
dataType=data_type,
r_is_scaled_out=True,
m_is_scaled_out=True,
)
def single_mode_proportional_to_time_supertranslated(**kwargs):
"""Return WaveformModes as in single_mode_proportional_to_time, with analytical supertranslation
This function constructs the same basic object as the `single_mode_proportional_to_time`, but then applies an
analytical supertranslation. The arguments to this function are the same as to the other, with two additions:
Additional parameters
---------------------
supertranslation : complex array, optional
Spherical-harmonic modes of the supertranslation to apply to the waveform. This is overwritten by
`space_translation` if present. Default value is `None`.
space_translation : float array of length 3, optional
This is just the 3-vector representing the displacement to apply to the waveform. Note that if
`supertranslation`, this parameter overwrites it. Default value is [1.0, 0.0, 0.0].
"""
s = kwargs.pop("s", -2)
ell = kwargs.pop("ell", abs(s))
m = kwargs.pop("m", -ell)
ell_min = kwargs.pop("ell_min", abs(s))
ell_max = kwargs.pop("ell_max", 8)
data_type = kwargs.pop("data_type", scri.DataType[scri.SpinWeights.index(s)])
t_0 = kwargs.pop("t_0", -20.0)
t_1 = kwargs.pop("t_1", 20.0)
dt = kwargs.pop("dt", 1.0 / 10.0)
t = np.arange(t_0, t_1 + dt, dt)
n_times = t.size
beta = kwargs.pop("beta", 1.0)
data = np.zeros((n_times, sf.LM_total_size(ell_min, ell_max)), dtype=complex)
data[:, sf.LM_index(ell, m, ell_min)] = beta * t
supertranslation = np.array(kwargs.pop("supertranslation", | np.array([], dtype=complex) | numpy.array |
import numpy as np
from gridworld import RLworld
import matplotlib.pyplot as plt
def get_path_length(path):
length = 0
for i in range(len(path) - 1):
if abs(path[i] - path[i + 1]) == 1:
length += 1
elif abs(path[i] - path[i + 1]) == 40:
length += 1
else:
length += 1.4
return length
# ################ env settings #######################
'''
智能体在环境中每移动一步奖励为-1,注斜向移动奖励为-1.4
到达终点奖励为100
'''
env = RLworld()
env.start = (0, 0)
env.ends = [(35, 15)]
env.refresh_setting()
# #################learning####################
Q = np.zeros([env.observation_space.n, env.action_space.n])
# Set learning parameters
lr = .6
gamma = .98
epsilon = .01
num_episodes = 3000
# create lists to contain total rewards and steps per episode
# jList = []
rList = []
path_len = []
# ###################training######################
for i in range(num_episodes):
# Reset environment and get first new observation
s = env.reset()
rAll = 0
# length = 0
path = []
done = False
j = 0
# The Q-Table learning algorithm
while j < 1000:
j += 1
# Choose an action by greedily (with noise) picking from Q table
path.append(s)
if np.random.random() < epsilon:
a = np.random.randint(0, env.action_space.n, dtype='l')
else:
a = np.argmax(Q[s, :])
# a = np.argmax(Q[s, :] + np.random.randn(1, env.action_space.n) * (1. / (i + 1)))
# Get new state and reward from environment
s1, r, done, _ = env.step(a)
# Update Q-Table with new knowledge
if np.random.random() < epsilon:
a_ = np.random.randint(0, env.action_space.n, dtype='l')
else:
a_ = np.argmax(Q[s, :])
Q[s, a] = Q[s, a] + lr * (r + gamma * Q[s1, a_] - Q[s, a])
rAll += r
s = s1
if done == True:
break
# jList.append(j)
rList.append(rAll)
path.append(env._xy_to_state((35, 15)))
path_len.append(get_path_length(path))
# #######################data process#############
print("Average Reward : " + str(sum(rList) / num_episodes))
print('final Reward is :', rAll)
# print("Final Q-Table Values")
# print(Q)
print("After Epoch {},path length is : {}".format(num_episodes, get_path_length(path)))
# print('after training path length is :', get_path_length(path))
# ######################routh map #################
env.get_path(path)
env.render()
# ############# reward ##########################
plt.title('reward Curve')
plt.xlabel('Epoch')
plt.ylabel('reward')
plt.plot( | np.arange(num_episodes) | numpy.arange |
# Copyright 2020 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from sklearn.preprocessing import StandardScaler
from scipy.linalg import solve_triangular
import os
import matplotlib.pyplot as plt
def fun_1d(x_in):
return 1.5*np.sin(x_in/.5) + 0.5*np.cos(x_in/.1) + x_in/8.
def generate_1d(N_tr: int = 1500, N_test: int = 200, random_gen: int = 1 ):
np.random.seed(random_gen)
x_train = 2*np.random.rand(N_tr)
x_train.sort()
y_train = fun_1d(x_train) + 0.1*np.random.randn(N_tr)
x_test = 2.4*np.random.rand(N_test) - 0.3
x_test.sort()
y_test = fun_1d(x_test)
return x_train[:, None], y_train, x_test[:, None], y_test
def generate_4d(N_tr: int = 1000, N_test: int = 300, random_gen: int = 1 ):
np.random.seed(random_gen)
D = 3
eps_sq_true = .1
sigma_sq_n_true = 0.01
sigma_f = 1.5
x_train_org = np.zeros((N_tr, D ))
x_test_org = np.zeros((N_test, D ))
x_train_org[:, 0] = np.random.randn(N_tr)
x_train_org[:, 1] = 5. + 2.*np.random.randn(N_tr)
x_train_org[:, 2] = 2. + 3.*np.random.randn(N_tr)
x_test_org[:, 0] = np.random.randn(N_test)
x_test_org[:, 1] = 5. + 2.*np.random.randn(N_test)
x_test_org[:, 2] = 2. + 3.*np.random.randn(N_test)
x_train_eps = np.sqrt(eps_sq_true)*x_train_org
diag_X = np.square(x_train_eps).sum(1)
dist_X_my = diag_X[:, None] - 2.*x_train_eps@x_train_eps.T + diag_X[None, :]
K_x = sigma_f*np.exp(-dist_X_my)
K_x_hat = K_x + sigma_sq_n_true*np.eye(N_tr) # [N, N]
L_hat = np.linalg.cholesky(K_x_hat) # [N, N]
Normal_iid_tr = np.random.randn(N_tr)
y_train = L_hat@Normal_iid_tr
alpha_y = solve_triangular(L_hat, y_train, lower=True)
log_marginal_lkl_true = -.5*(np.square(alpha_y).sum() + N_tr*np.log(2*np.pi)) - np.log(np.diag(L_hat)).sum()
x_test_eps = np.sqrt(eps_sq_true)*x_test_org
diag_X_ntest = np.square(x_test_eps).sum(1)
dist_X_my_ntest = diag_X_ntest[:, None] - 2.*x_test_eps@x_test_eps.T + diag_X_ntest[None, :]
K_x_ntest = sigma_f*np.exp(-dist_X_my_ntest) + sigma_sq_n_true*np.eye(N_test)
L_hat_ntest = np.linalg.cholesky(K_x_ntest) # [N, N]
Normal_iid_test = np.random.randn(N_test)
y_test = L_hat_ntest@Normal_iid_test
x_train = np.zeros((N_tr, D + 1))
x_test = np.zeros((N_test, D + 1))
delta = np.random.rand(N_tr)
x_train[:, :D-1] = x_train_org[:, :D-1]
x_train[:, D-1] = delta*x_train_org[:, D-1]
x_train[:, D] = (1-delta)*x_train_org[:, D-1]
delta_test = np.random.rand(N_test)
x_test[:, :D-1] = x_test_org[:, :D-1]
x_test[:, D-1] = delta_test*x_test_org[:, D-1]
x_test[:, D] = (1-delta_test)*x_test_org[:, D-1]
return x_train, y_train, x_test, y_test
def load_dataset(dataset_name, train_ratio = 0.9, random_gen: int = 1):
dir_parent = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
data_all = np.load(dir_parent + '/data/' + dataset_name + '_all.npy', allow_pickle=True)
x_all, y_all = data_all[()]['x_all'], data_all[()]['y_all']
np.random.seed(random_gen)
perm = np.random.permutation(np.arange(y_all.shape[0]))
step = y_all.shape[0] - int(train_ratio*y_all.shape[0])
return x_all, y_all, perm, step
def train_test_split(x_all, y_all, perm, step, split_id):
start = split_id*step
train_ind = np.concatenate((perm[:start], perm[start+step:]))
test_ind = perm[start:start+step]
return x_all[train_ind], y_all[train_ind], x_all[test_ind], y_all[test_ind]
def standardize_dataset(dataset_name, x_train, y_train, x_test, y_test):
if dataset_name == 'house_electric':
col_standarize = np.arange(13, 19)
for col in col_standarize:
mean_col = | np.mean(x_train[:, col]) | numpy.mean |
import time
import numpy as np
from scipy import sparse
import sys
from skimage import measure
from skimage import io
from threading import Thread
import traceback
def get_matrix_filename(algorithm, variant, size):
return "radon_server/static/npz/" + algorithm + "." + variant + "." + str(size) + ".npz"
class RadonTransformThread(Thread):
def __init__(self, action="transform", variant=None, args=None, method="direct"):
if args is None:
self.args = {}
else:
self.args = args
self.should_update_progress = True
self.action = action
self.progress = 0
self.took = 0
self.cond = 0
self.startTime = time.time()
self.radon = None
self.reconstructed = None
self.variant = variant
self.ratio = self.get_matrix_ratio()
self.matrix = None
self.matrix_size = 0
self.similarity = None
self.method = method
self.error = None
self.reconstruct_multiply = self.get_reconstruct_multiply()
self.size = 0
self.started = False
super(RadonTransformThread, self).__init__()
# override those methods
def get_algorithm_name(self):
return ""
def get_matrix_ratio(self):
return 1
def get_reconstruct_multiply(self):
return 255
def run_transform(self, image, n, variant=None):
pass
def need_matrix(self):
return True
def run_build_matrix(self, n, variant):
cols = []
progress = 0
for i in range(n):
for j in range(n):
x = np.zeros((n, n), dtype=np.float64)
x[i, j] = 255
self.should_update_progress = False
self.run_transform(x, n, variant)
self.should_update_progress = True
rx = self.radon
nn = int(n * self.ratio * n * self.ratio)
col = sparse.coo_matrix(
(np.reshape(rx, (nn)), (np.arange(nn).astype("int"), np.zeros(nn, dtype='int'))))
cols.append(col)
progress += 1
self.matrix_size = sys.getsizeof(cols)
self.update_progress(progress, n * n)
self.matrix = sparse.hstack(cols)
def calculate_reconstructed_score(self):
original_image = io.imread(self.args["original_file"], as_gray=True).astype('float64') * 255
self.similarity = measure.compare_ssim(original_image, self.reconstructed,
data_range=255)
def get_matrix(self, variant, n):
# load matrix file
matrix_filename = get_matrix_filename(self.get_algorithm_name(), variant, n)
A = sparse.load_npz(matrix_filename)
AT = A.transpose()
return A, AT
def reconstruct_callback(self, xk):
# evaluate progress by comparing to the last reconstructed image
progress = measure.compare_ssim(np.reshape(xk, (self.size, self.size)),
self.reconstructed, data_range=255) * 100
if progress > self.progress:
self.progress = progress
self.took = (time.time() - self.startTime) * 1000
self.reconstructed = np.reshape(xk, (self.size, self.size)) * self.reconstruct_multiply
self.calculate_reconstructed_score()
def run_reconstruct(self, image, n, variant=None):
try:
(A, AT) = self.get_matrix(variant, n)
# reconstruct
self.size = n
R = np.reshape(image, (n * n * self.ratio * self.ratio))
if self.method == "direct":
reconstructed = sparse.linalg.spsolve(AT * A, AT * R)
elif self.method == "lsqr":
reconstructed = sparse.linalg.lsqr(A, R, atol=self.args["tolerance"], btol=self.args["tolerance"])[0]
elif self.method == "gmres":
reconstructed = \
sparse.linalg.gmres(AT * A, AT * R, tol=self.args["tolerance"], callback=self.reconstruct_callback)[
0]
elif self.method == "cg":
reconstructed = \
sparse.linalg.cgs(AT * A, AT * R, tol=self.args["tolerance"], callback=self.reconstruct_callback)[0]
elif self.method == "qmr":
reconstructed = \
sparse.linalg.qmr(AT * A, AT * R, tol=self.args["tolerance"], callback=self.reconstruct_callback)[0]
else:
raise Exception("Unsupported reconstruction method " + self.method)
self.reconstructed = np.reshape(reconstructed, (self.size, self.size)) * self.reconstruct_multiply
self.calculate_reconstructed_score()
self.update_progress(100, 100)
except Exception as e:
traceback.print_exc()
self.update_progress(100, 100)
self.error = e
def start_algorithm(self, image, n, variant, action):
try:
if action == "transform":
self.radon = np.zeros((n, n), dtype='float64')
self.run_transform(image, n, variant)
elif action == "build_matrix":
self.run_build_matrix(n, variant)
sparse.save_npz(
get_matrix_filename(self.get_algorithm_name(), self.variant, n),
self.matrix)
elif action == "reconstruct":
self.reconstructed = np.zeros((n, n), dtype='float64')
self.run_reconstruct(image, n, variant)
self.took = (time.time() - self.startTime) * 1000
except Exception as e:
traceback.print_exc()
self.update_progress(100, 100)
self.error = e
def save(self):
if self.action == "transform":
# save an image file for preview the radon transform
io.imsave(self.args["target_image"],
((self.radon - np.min(np.max(self.radon, 0))) * 255 / (np.max(self.radon) - np.min(self.radon))))
# save the radon file itself
np.save(self.args["target_file"], self.radon)
# calculate the cond value of the matrix
(w, v) = np.linalg.eig(self.radon.transpose() * self.radon)
self.cond = np.sqrt(np.max(np.real(v)) - np.min(np.real(v)))
elif self.action == "reconstruct":
io.imsave(self.args["target_file"], | np.round(self.reconstructed) | numpy.round |
import pytest
from mrmustard import *
import numpy as np
import tensorflow as tf
from thewalrus.random import random_covariance
from thewalrus.quantum import real_to_complex_displacements
from mrmustard.physics import gaussian as gp, fock as fp
from mrmustard.math import Math
math = Math()
class TestGaussianStates:
@pytest.mark.parametrize("hbar", [1 / 2, 1.0, 2.0, 1.6])
@pytest.mark.parametrize("num_modes", np.arange(5, 10))
@pytest.mark.parametrize("pure", [True, False])
@pytest.mark.parametrize("block_diag", [True, False])
def test_fidelity_is_symmetric(self, num_modes, hbar, pure, block_diag):
"""Test that the fidelity is symmetric"""
cov1 = random_covariance(num_modes, hbar=hbar, pure=pure, block_diag=block_diag)
means1 = np.sqrt(2 * hbar) * np.random.rand(2 * num_modes)
cov2 = random_covariance(num_modes, hbar=hbar, pure=pure, block_diag=block_diag)
means2 = np.sqrt(2 * hbar) * np.random.rand(2 * num_modes)
f12 = gp.fidelity(means1, cov1, means2, cov2, hbar)
f21 = gp.fidelity(means2, cov2, means1, cov1, hbar)
assert np.allclose(f12, f21)
@pytest.mark.parametrize("hbar", [1 / 2, 1.0, 2.0, 1.6])
@pytest.mark.parametrize("num_modes", np.arange(5, 10))
@pytest.mark.parametrize("pure", [True, False])
@pytest.mark.parametrize("block_diag", [True, False])
def test_fidelity_is_leq_one(self, num_modes, hbar, pure, block_diag):
"""Test that the fidelity is between 0 and 1"""
cov1 = random_covariance(num_modes, hbar=hbar, pure=pure, block_diag=block_diag)
means1 = np.sqrt(2 * hbar) * | np.random.rand(2 * num_modes) | numpy.random.rand |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 6 14:40:17 2020
@author: lukepinkel
"""
import numpy as np
import scipy as sp
def fo_fc_fd(f, x, eps=None, args=()):
if eps is None:
eps = (np.finfo(float).eps)**(1.0/3.0)
n = len(np.asarray(x))
g, h = np.zeros(n), np.zeros(n)
for i in range(n):
h[i] = eps
g[i] = (f(x+h, *args) - f(x, *args)) / eps
h[i] = 0
return g
def so_fc_fd(f, x, eps=None, args=()):
if eps is None:
eps = (np.finfo(float).eps)**(1.0/3.0)
n = len(np.asarray(x))
H, hi, hj = np.zeros((n, n)), np.zeros(n), np.zeros(n)
eps2 = eps**2
for i in range(n):
hi[i] = eps
for j in range(i+1):
hj[j] = eps
H[i, j] = (f(x+hi+hj, *args) - f(x+hi, *args) - f(x+hj, *args) + f(x, *args)) / eps2
H[j, i] = H[i, j]
hj[j] = 0
hi[i] = 0
return H
def so_gc_fd(g, x, eps=None, args=()):
if eps is None:
eps = (np.finfo(float).eps)**(1.0/3.0)
n = len(np.asarray(x))
H, h = np.zeros((n, n)), np.zeros(n)
gx, gxh = np.zeros((n, n)), np.zeros((n, n))
for i in range(n):
h[i] = eps
gx[i] = g(x, *args)
gxh[i] = g(x+h, *args)
h[i] = 0
for i in range(n):
for j in range(i+1):
H[i, j] = ((gxh[i, j] - gx[i, j]) + (gxh[j, i] - gx[j, i])) / (2 * eps)
H[j, i] = H[i, j]
return H
def fo_fc_cd(f, x, eps=None, args=()):
if eps is None:
eps = (np.finfo(float).eps)**(1.0/3.0)
n = len(np.asarray(x))
g, h = np.zeros(n), np.zeros(n)
for i in range(n):
h[i] = eps
g[i] = (f(x+h, *args) - f(x - h, *args)) / (2 * eps)
h[i] = 0
return g
def so_fc_cd(f, x, eps=None, args=()):
p = len(np.asarray(x))
if eps is None:
eps = ( | np.finfo(float) | numpy.finfo |
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.optim import lr_scheduler
import torch.utils.data as data
from torch.nn.utils.rnn import pack_padded_sequence as pack, pad_packed_sequence as unpack
import torchaudio
import torchaudio.transforms as tat
import numpy as np
import os
import glob
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from pytorch_audio_utils import *
parser = argparse.ArgumentParser(description='PyTorch Language ID Classifier Trainer')
parser.add_argument('--epochs', type=int, default=5,
help='upper epoch limit')
parser.add_argument('--batch-size', type=int, default=6,
help='batch size')
parser.add_argument('--window-size', type=int, default=200,
help='size of fft window')
parser.add_argument('--validate', action='store_true',
help='do out-of-bag validation')
parser.add_argument('--log-interval', type=int, default=5,
help='reports per epoch')
parser.add_argument('--load-model', type=str, default=None,
help='path of model to load')
parser.add_argument('--save-model', action='store_true',
help='path to save the final model')
parser.add_argument('--train-full-model', action='store_true',
help='train full model vs. final layer')
args = parser.parse_args()
class Preemphasis(object):
"""Perform preemphasis on signal
y = x[n] - α*x[n-1]
Args:
alpha (float): preemphasis coefficient
"""
def __init__(self, alpha=0.97):
self.alpha = alpha
def __call__(self, sig):
"""
Args:
sig (Tensor): Tensor of audio of size (Samples x Channels)
Returns:
sig (Tensor): Preemphasized. See equation above.
"""
if self.alpha == 0:
return sig
else:
sig[1:, :] -= self.alpha * sig[:-1, :]
return sig
class RfftPow(object):
"""This function emulates power of the discrete fourier transform.
Note: this implementation may not be numerically stable
Args:
K (int): number of fft freq bands
"""
def __init__(self, K=None):
self.K = K
def __call__(self, sig):
"""
Args:
sig (Tensor): Tensor of audio of size (Samples x Channels)
Returns:
S (Tensor): spectrogram
"""
N = sig.size(1)
if self.K is None:
K = N
else:
K = self.K
k_vec = torch.arange(0, K).unsqueeze(0)
n_vec = torch.arange(0, N).unsqueeze(1)
angular_pt = 2 * np.pi * k_vec * n_vec / K
S = torch.sqrt(torch.matmul(sig, angular_pt.cos())**2 + \
torch.matmul(sig, angular_pt.sin())**2)
S = S.squeeze()[:(K//2+1)]
S = (1 / K) * S**2
return S
class FilterBanks(object):
"""Bins a periodogram from K fft frequency bands into N bins (banks)
fft bands (K//2+1) -> filterbanks (n_filterbanks) -> bins (bins)
Args:
n_filterbanks (int): number of filterbanks
bins (list): number of bins
"""
def __init__(self, n_filterbanks, bins):
self.n_filterbanks = n_filterbanks
self.bins = bins
def __call__(self, S):
"""
Args:
S (Tensor): Tensor of Spectro- / Periodogram
Returns:
fb (Tensor): binned filterbanked spectrogram
"""
conversion_factor = np.log(10) # torch.log10 doesn't exist
K = S.size(0)
fb_mat = torch.zeros((self.n_filterbanks, K))
for m in range(1, self.n_filterbanks+1):
f_m_minus = int(self.bins[m - 1])
f_m = int(self.bins[m])
f_m_plus = int(self.bins[m + 1])
fb_mat[m - 1, f_m_minus:f_m] = (torch.arange(f_m_minus, f_m) - f_m_minus) / (f_m - f_m_minus)
fb_mat[m - 1, f_m:f_m_plus] = (f_m_plus - torch.arange(f_m, f_m_plus)) / (f_m_plus - f_m)
fb = torch.matmul(S, fb_mat.t())
fb = 20 * torch.log(fb) / conversion_factor
return fb
class MFCC(object):
"""Discrete Cosine Transform
There are three types of the DCT. This is 'Type 2' as described in the scipy docs.
filterbank bins (bins) -> mfcc (mfcc)
Args:
n_filterbanks (int): number of filterbanks
n_coeffs (int): number of mfc coefficients to keep
mode (str): orthogonal transformation
"""
def __init__(self, n_filterbanks, n_coeffs, mode="ortho"):
self.n_filterbanks = n_filterbanks
self.n_coeffs = n_coeffs
self.mode = "ortho"
def __call__(self, fb):
"""
Args:
fb (Tensor): Tensor of binned filterbanked spectrogram
Returns:
mfcc (Tensor): Tensor of mfcc coefficients
"""
K = self.n_filterbanks
k_vec = torch.arange(0, K).unsqueeze(0)
n_vec = torch.arange(0, self.n_filterbanks).unsqueeze(1)
angular_pt = np.pi * k_vec * ((2*n_vec+1) / (2*K))
mfcc = 2 * torch.matmul(fb, angular_pt.cos())
if self.mode == "ortho":
mfcc[0] *= np.sqrt(1/(4*self.n_filterbanks))
mfcc[1:] *= np.sqrt(1/(2*self.n_filterbanks))
return mfcc[1:(self.n_coeffs+1)]
class Sig2Features(object):
"""Get the log power, MFCCs and 1st derivatives of the signal across n hops
and concatenate all that together
Args:
n_hops (int): number of filterbanks
transformDict (dict): dict of transformations for each hop
"""
def __init__(self, ws, hs, transformDict):
self.ws = ws
self.hs = hs
self.td = transformDict
def __call__(self, sig):
"""
Args:
sig (Tensor): Tensor of signal
Returns:
Feats (Tensor): Tensor of log-power, 12 mfcc coefficients and 1st devs
"""
n_hops = (sig.size(0) - ws) // hs
P = []
Mfcc = []
for i in range(n_hops):
# create frame
st = int(i * hs)
end = st + ws
sig_n = sig[st:end]
# get power/energy
P += [self.td["RfftPow"](sig_n.transpose(0, 1))]
# get mfccs and filter banks
fb = self.td["FilterBanks"](P[-1])
Mfcc += [self.td["MFCC"](fb)]
# concat and calculate derivatives
P = torch.stack(P, 1)
P_sum = torch.log(P.sum(0))
P_dev = torch.zeros(P_sum.size())
P_dev[1:] = P_sum[1:] - P_sum[:-1]
Mfcc = torch.stack(Mfcc, 1)
Mfcc_dev = torch.cat((torch.zeros(n_coefficients, 1), Mfcc[:,:-1] - Mfcc[:,1:]), 1)
Feats = torch.cat((P_sum.unsqueeze(0), P_dev.unsqueeze(0), Mfcc, Mfcc_dev), 0)
return Feats
class Labeler(object):
"""Labels from text to int + 1
"""
def __call__(self, labels):
return torch.LongTensor([int(l)+1 for l in labels])
def pad_packed_collate(batch):
"""Puts data, and lengths into a packed_padded_sequence then returns
the packed_padded_sequence and the labels. Set use_lengths to True
to use this collate function.
Args:
batch: (list of tuples) [(audio, target)].
audio is a FloatTensor
target is a LongTensor with a length of 8
Output:
packed_batch: (PackedSequence), see torch.nn.utils.rnn.pack_padded_sequence
labels: (Tensor), labels from the file names of the wav.
"""
if len(batch) == 1:
sigs, labels = batch[0][0], batch[0][1]
sigs = sigs.t()
lengths = [sigs.size(0)]
sigs.unsqueeze_(0)
labels.unsqueeze_(0)
if len(batch) > 1:
sigs, labels, lengths = zip(*[(a.t(), b, a.size(1)) for (a,b) in sorted(batch, key=lambda x: x[0].size(1), reverse=True)])
max_len, n_feats = sigs[0].size()
sigs = [torch.cat((s, torch.zeros(max_len - s.size(0), n_feats)), 0) if s.size(0) != max_len else s for s in sigs]
sigs = torch.stack(sigs, 0)
labels = torch.stack(labels, 0)
packed_batch = pack(Variable(sigs), lengths, batch_first=True)
return packed_batch, labels
def unpack_lengths(batch_sizes):
"""taken directly from pad_packed_sequence()
"""
lengths = []
data_offset = 0
prev_batch_size = batch_sizes[0]
for i, batch_size in enumerate(batch_sizes):
dec = prev_batch_size - batch_size
if dec > 0:
lengths.extend((i,) * dec)
prev_batch_size = batch_size
lengths.extend((i + 1,) * batch_size)
lengths.reverse()
return lengths
class EncoderRNN2(nn.Module):
def __init__(self, input_size, hidden_size, n_layers=1, batch_size=1):
super(EncoderRNN2, self).__init__()
self.n_layers = n_layers
self.hidden_size = hidden_size
self.batch_size = batch_size
self.gru = nn.GRU(input_size, hidden_size, n_layers, batch_first=True)
def forward(self, input, hidden):
output = input
output, hidden = self.gru(output, hidden)
#print("encoder:", output.size(), hidden.size())
return output, hidden
def initHidden(self, ttype=None):
if ttype == None:
ttype = torch.FloatTensor
result = Variable(ttype(self.n_layers * 1, self.batch_size, self.hidden_size).fill_(0))
if use_cuda:
return result.cuda()
else:
return result
class Attn(nn.Module):
def __init__(self, hidden_size, batch_size=1, method="dot"):
super(Attn, self).__init__()
self.method = method
self.hidden_size = hidden_size
self.batch_size = batch_size
if self.method == 'general':
self.attn = nn.Linear(self.hidden_size, hidden_size, bias=False)
elif self.method == 'concat':
self.attn = nn.Linear(self.hidden_size * 2, hidden_size, bias=False)
self.v = nn.Parameter(torch.FloatTensor(batch_size, 1, hidden_size))
def forward(self, hidden, encoder_outputs):
max_len = encoder_outputs.size(1)
# get attn energies in one batch
attn_energies = self.score(hidden, encoder_outputs)
# Normalize energies to weights in range 0 to 1
return F.softmax(attn_energies)
def score(self, hidden, encoder_output):
#print("attn.score:", hidden.size(), encoder_output.size())
if self.method == 'general':
energy = self.attn(encoder_output)
energy = energy.transpose(2, 1)
energy = hidden.bmm(energy)
return energy
elif self.method == 'concat':
hidden = hidden * Variable(encoder_output.data.new(encoder_output.size()).fill_(1)) # broadcast hidden to encoder_outputs size
energy = self.attn(torch.cat((hidden, encoder_output), -1))
energy = energy.transpose(2, 1)
energy = self.v.bmm(energy)
return energy
else:
#self.method == 'dot':
encoder_output = encoder_output.transpose(2, 1)
energy = hidden.bmm(encoder_output)
return energy
class LuongAttnDecoderRNN(nn.Module):
def __init__(self, hidden_size, output_size, attn_model="dot", n_layers=1, dropout=0.1, batch_size=1):
super(LuongAttnDecoderRNN, self).__init__()
# Keep for reference
self.attn_model = attn_model
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.dropout = dropout
self.batch_size = batch_size
# Define layers
self.gru = nn.GRU(hidden_size, hidden_size, n_layers, dropout=dropout, batch_first=True)
self.concat = nn.Linear(hidden_size * 2, hidden_size)
self.out = nn.Linear(hidden_size, output_size)
# Choose attention model
if attn_model != 'none':
self.attn = Attn(hidden_size, method=attn_model, batch_size=batch_size)
def forward(self, input_seq, last_hidden, encoder_outputs):
# Note: This now runs in batch but was originally run one
# step at a time
# B = batch size
# S = output length
# N = # of hidden features
# Get the embedding of the current input word (last output word)
batch_size = input_seq.size(0)
# Get current hidden state from input word and last hidden state
rnn_output, hidden = self.gru(input_seq, last_hidden)
# Calculate attention from current RNN state and all encoder outputs;
# apply to encoder outputs to get weighted average
#print("decoder:", rnn_output.size(), encoder_outputs.size())
attn_weights = self.attn(rnn_output, encoder_outputs)
context = attn_weights.bmm(encoder_outputs) # [B, S, L] dot [B, L, N] -> [B, S, N]
print(attn_weights.size(), encoder_outputs.size(), context.size())
#print("decoder context:", context.size())
# Attentional vector using the RNN hidden state and context vector
# concatenated together (Luong eq. 5)
concat_input = torch.cat((rnn_output, context), -1) # B x S x 2*N
concat_output = F.tanh(self.concat(concat_input))
# Finally predict next token (Luong eq. 6, without softmax)
output = self.out(concat_output)
# Return final output, hidden state, and attention weights (for visualization)
return output, hidden, attn_weights
# train parameters
epochs = args.epochs
# set dataset parameters
DATADIR = "/home/david/Programming/data"
sr = 8000
ws = args.window_size
hs = ws // 2
n_fft = 512 # 256
n_filterbanks = 26
n_coefficients = 12
low_mel_freq = 0
high_freq_mel = (2595 * np.log10(1 + (sr/2) / 700))
mel_pts = np.linspace(low_mel_freq, high_freq_mel, n_filterbanks + 2)
hz_pts = np.floor(700 * (10**(mel_pts / 2595) - 1))
bins = np.floor((n_fft + 1) * hz_pts / sr)
# data transformations
td = {
"RfftPow": RfftPow(n_fft),
"FilterBanks": FilterBanks(n_filterbanks, bins),
"MFCC": MFCC(n_filterbanks, n_coefficients),
}
transforms = tat.Compose([
tat.Scale(),
tat.PadTrim(58000, fill_value=1e-8),
Preemphasis(),
Sig2Features(ws, hs, td),
])
# set network parameters
use_cuda = torch.cuda.is_available()
batch_size = args.batch_size
input_features = 26
hidden_size = 100
output_size = 3
#output_length = (8 + 7 + 2) # with "blanks"
output_length = 8 # without blanks
n_layers = 1
attn_modus = "dot"
# build networks, criterion, optimizers, dataset and dataloader
encoder2 = EncoderRNN2(input_features, hidden_size, n_layers=n_layers, batch_size=batch_size)
decoder2 = LuongAttnDecoderRNN(hidden_size, output_size, n_layers=n_layers, attn_model=attn_modus, batch_size=batch_size)
print(encoder2)
print(decoder2)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.RMSprop([
{"params": encoder2.parameters()},
{"params": decoder2.parameters(), "lr": 0.0001}
], lr=0.001, momentum=0.9)
scheduler = lr_scheduler.StepLR(optimizer, step_size=80, gamma=0.6)
ds = torchaudio.datasets.YESNO(DATADIR, transform=transforms, target_transform=Labeler())
dl = data.DataLoader(ds, batch_size=batch_size)
if use_cuda:
print("using CUDA: {}".format(use_cuda))
encoder2 = encoder2.cuda()
decoder2 = decoder2.cuda()
loss_total = []
# begin training
for epoch in range(epochs):
scheduler.step()
print("epoch {}".format(epoch+1))
running_loss = 0
loss_epoch = []
for i, (mb, tgts) in enumerate(dl):
# set model into train mode and clear gradients
encoder2.train()
decoder2.train()
encoder2.zero_grad()
decoder2.zero_grad()
# set inputs and targets
mb = mb.transpose(2, 1) # [B x N x L] -> [B, L, N]
if use_cuda:
mb, tgts = mb.cuda(), tgts.cuda()
mb, tgts = Variable(mb), Variable(tgts)
encoder2_hidden = encoder2.initHidden(type(mb.data))
encoder2_output, encoder2_hidden = encoder2(mb, encoder2_hidden)
#print(encoder2_output)
# Prepare input and output variables for decoder
dec_i = Variable(encoder2_output.data.new([[[0] * hidden_size] * output_length] * batch_size))
dec_h = encoder2_hidden # Use last (forward) hidden state from encoder
#print(dec_h.size())
"""
# Run through decoder one time step at a time
# collect attentions
attentions = []
outputs = []
dec_i = Variable(torch.FloatTensor([[[0] * hidden_size] * 1]))
target_seq = Variable(torch.FloatTensor([[[-1] * hidden_size]*8]))
for t in range(output_length):
#print("t:", t, dec_i.size())
dec_o, dec_h, dec_attn = decoder2(
dec_i, dec_h, encoder2_output
)
#print("decoder output", dec_o.size())
dec_i = target_seq[:,t].unsqueeze(1) # Next input is current target
outputs += [dec_o]
attentions += [dec_attn]
dec_o = torch.cat(outputs, 1)
dec_attn = torch.cat(attentions, 1)
"""
# run through decoder in one shot
dec_o, dec_h, dec_attn = decoder2(dec_i, dec_h, encoder2_output)
# calculate loss and backprop
loss = criterion(dec_o.view(-1, output_size), tgts.view(-1))
running_loss += loss.data[0]
loss_epoch += [loss.data[0]]
loss.backward()
#nn.utils.clip_grad_norm(encoder2.parameters(), 0.05)
#nn.utils.clip_grad_norm(decoder2.parameters(), 0.05)
optimizer.step()
# logging stuff
if (i % args.log_interval == 0 and i != 0) or epoch == 0:
print(loss.data[0])
loss_total += [loss_epoch]
print((dec_o.max(2)[1].data == tgts.data).float().sum(1) / tgts.size(1))
print("ave loss of {} at epoch {}".format(running_loss / (i+1), epoch+1))
loss_total = | np.array(loss_total) | numpy.array |
"""
Module for processing and analysis of the geospatial graph.
See https://networkx.org/documentation/stable/index.html for graph operations.
"""
from __future__ import annotations
import bz2
import gzip
import inspect
import os
import pathlib
import pickle
from copy import deepcopy
from itertools import zip_longest
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Union
import geopandas as gpd
import networkx as nx
import numpy as np
import pandas as pd
import pyproj
import rasterio
import shapely
from shapely.prepared import prep
from tqdm import tqdm
from geograph import binary_graph_operations, metrics
from geograph.metrics import CLASS_METRICS_DICT, Metric
from geograph.utils import rasterio_utils
pd.options.mode.chained_assignment = None # default='warn'
VALID_EXTENSIONS = (
".pickle",
".pkl",
".gz",
".bz2",
".shp",
".gpkg",
".tiff",
".tif",
".geotif",
".geotiff",
)
class GeoGraph:
"""Class for the fragmentation graph."""
def __init__(
self,
data,
crs: Optional[Union[str, pyproj.CRS]] = None,
graph_save_path: Optional[Union[str, os.PathLike]] = None,
raster_save_path: Optional[Union[str, os.PathLike]] = None,
columns_to_rename: Optional[Dict[str, str]] = None,
tolerance: float = 0.0,
**kwargs,
) -> None:
"""
Class for the fragmentation graph.
This class can load a pickled networkx graph directly, or create the
graph from
- a path to vector data (.shp, .gpkg)
- a path to raster data (.tif, .tiff, .geotif, .geotiff)
- a numpy array containing raster data
- a dataframe containing polygons.
Note that the final dataframe must contain a class label column named
"class_label" and a "geometry column containing the polygon data - the
`columns_to_rename` argument allows for renaming columns to ensure this.
Warning: loading and saving GeoGraphs uses pickle. Loading untrusted
data using the pickle module is not secure as it can execute arbitrary
code. Therefore, only load GeoGraphs that come from a trusted source.
See the pickle documentation for more details:
https://docs.python.org/3/library/pickle.html
Args:
data: Can be a path to a pickle file or compressed pickle file to load
the graph from, a path to vector data in GPKG or Shapefile format,
a path to raster data in GeoTiff format, a numpy array containing raster
data, or a dataframe containing polygons.
crs (str): Coordinate reference system to set on the resulting
dataframe. Warning: whatever units of distance the CRS uses will be
the units of distance for all polygon calculations, including for
the `tolerance` argument. Using a lat-long CRS can therefore result
in incoherent output.
graph_save_path (str or pathlib.Path, optional): A path to a pickle
file to save the graph to, can be `.gz` or `.bz2`. Defaults to None,
which will not save the graph.
raster_save_path (str or pathlib.Path, optional): A path to a file
to save the polygonised raster data in. A path to a GPKG file is
recommended, but Shapefiles also work. Defaults to None, which will
not save the polygonised data.
columns_to_rename (Dict[str, str], optional): A dictionary mapping
column names in the loaded dataframe with the new names of these
columns. Use this to ensure that the dataframe has "class_label" and
"geometry" columns. Defaults to None.
tolerance (float, optional): Adds edges between neighbours that are
at most `tolerance` units apart. Defaults to 0.
**mask (np.ndarray, optional): Boolean mask that can be applied over
the polygonisation. Defaults to None.
**transform (affine.Affine, optional): Affine transformation to
apply when polygonising. Defaults to the identity transform.
**connectivity (int, optional): Use 4 or 8 pixel connectivity for
grouping pixels into features. Defaults to 4.
**apply_buffer (bool, optional): Apply shapely buffer function to
the polygons after polygonising. This can fix issues with the
polygonisation creating invalid geometries.
"""
super().__init__()
self.graph = nx.Graph()
self.habitats: Dict[str, HabitatGeoGraph] = {}
self._crs: Optional[Union[str, pyproj.CRS]] = crs
self._columns_to_rename: Optional[Dict[str, str]] = columns_to_rename
self._tolerance: float = tolerance
self.metrics: Dict[str, Metric] = {}
self.class_metrics: Dict[str, Dict[Union[str, int], Metric]] = {}
if raster_save_path is not None:
raster_save_path = pathlib.Path(raster_save_path)
if raster_save_path.suffix not in (".shp", ".gpkg"):
raise ValueError("Argument `save_path` should be a GPKG or Shapefile.")
os.makedirs(raster_save_path.parent, exist_ok=True)
load_from_graph: bool = False
# Load from disk
if isinstance(data, (str, os.PathLike)):
load_path = pathlib.Path(data)
assert load_path.exists()
# Load from saved graph
if load_path.suffix in (".pickle", ".pkl", ".gz", ".bz2"):
self.df = self._load_from_graph_path(load_path)
load_from_graph = True
# Load from saved vector data
elif load_path.suffix in (".shp", ".gpkg"):
self.df = self._load_from_vector_path(load_path)
# Load from saved raster data
elif load_path.suffix in (".tiff", ".tif", ".geotif", ".geotiff"):
self.df = self._load_from_raster_path(
load_path, raster_save_path, **kwargs
)
else:
raise ValueError(
f"""Extension {load_path.suffix} unknown.
Must be one of {VALID_EXTENSIONS}"""
)
# Load from objects in memory
# Load from dataframe
elif isinstance(data, gpd.GeoDataFrame):
self.df = self._load_from_dataframe(data, tolerance=self._tolerance)
# Load from raster array
elif isinstance(data, np.ndarray):
self.df = self._load_from_raster(data, raster_save_path, **kwargs)
else:
raise ValueError(
"""Type of `data` unknown. Must be a dataframe, numpy
array, or file path."""
)
# Save resulting graph, if we didn't load from graph.
if not load_from_graph and graph_save_path is not None:
graph_save_path = pathlib.Path(graph_save_path)
os.makedirs(graph_save_path.parent, exist_ok=True)
self.save_graph(graph_save_path)
self.components = self.get_graph_components(calc_polygons=False)
print(
f"Graph successfully loaded with {self.graph.number_of_nodes()} nodes",
f"and {self.graph.number_of_edges()} edges.",
)
def __eq__(self, o: object) -> bool:
if not isinstance(o, GeoGraph):
return False
return nx.fast_could_be_isomorphic(self.graph, o.graph)
@property
def rtree(self):
"""Return Rtree object."""
return self.df.sindex
@property
def crs(self):
"""Return crs of dataframe."""
return self.df.crs
@property
def bounds(self):
"""Return bounds of entire graph."""
return self.df.sindex.bounds
@property
def class_label(self):
"""Return class label of nodes directly from underlying numpy array.
Note: Uses `iloc` type indexing.
"""
return self.df["class_label"].values
@property
def classes(self) -> np.ndarray:
"""Return a list of the sorted, unique class labels in the graph."""
return np.unique(self.df["class_label"].values)
@property
def geometry(self):
"""Return geometry of nodes from underlying numpy array.
Note: Uses `iloc` type indexing.
"""
return self.df["geometry"].values
def _load_from_vector_path(
self,
vector_path: pathlib.Path,
load_slice=None,
) -> gpd.GeoDataFrame:
"""
Load graph and dataframe with vector data from GeoPackage or shape file.
Args:
vector_path (pathlib.Path): Path to a gpkg or shp file.
load_slice: A slice object denoting the rows of the dataframe to
load. Defaults to None, meaning load all rows.
Returns:
gpd.GeoDataFrame: The dataframe containing polygon objects.
"""
# First try to load as GeoPackage, then as Shapefile.
if slice is not None:
dataframe = gpd.read_file(
vector_path, rows=load_slice, enabled_drivers=["GPKG", "ESRI Shapefile"]
)
else:
dataframe = gpd.read_file(
vector_path, enabled_drivers=["GPKG", "ESRI Shapefile"]
)
return self._load_from_dataframe(dataframe, tolerance=self._tolerance)
def _load_from_raster_path(
self,
raster_path: pathlib.Path,
save_path: Optional[pathlib.Path],
**raster_kwargs,
) -> gpd.GeoDataFrame:
"""
Load raster data from a GeoTiff file, then load graph and dataframe.
Note: Assumes that relevant data is stored in the first band (band 1)
by default.
Args:
raster_path (pathlib.Path): A path to a file of raster data in
GeoTiff format.
save_path (pathlib.Path, optional): A path to a file to save the
polygonised raster data in. A path to a GPKG file is recommended,
but Shapefiles also work.
Returns:
gpd.GeoDataFrame: The dataframe containing polygon objects.
"""
with rasterio.open(raster_path) as image:
# Load band 1 (Assumes that landcover map is in first band by default)
data = image.read(1)
return self._load_from_raster(data, save_path, **raster_kwargs)
def _load_from_raster(
self, data_array: np.ndarray, save_path: Optional[pathlib.Path], **raster_kwargs
) -> gpd.GeoDataFrame:
"""
Load raster data, polygonise, then load graph and dataframe.
The raster data should be in GeoTiff format.
Polygonisation via `rasterio.features.shapes`, which uses `gdal_polygonize`.
References:
(1) https://rasterio.readthedocs.io/en/latest/api/rasterio.features.html
(2) https://gdal.org/programs/gdal_polygonize.html
Args:
data_array (np.ndarray): 2D numpy array with the raster data.
save_path (pathlib.Path, optional): A path to a file to save the
polygonised raster data in. A path to a GPKG file is recommended,
but Shapefiles also work.
Returns:
gpd.GeoDataFrame: The dataframe containing polygon objects.
"""
vector_df = rasterio_utils.polygonise(data_array=data_array, **raster_kwargs)
if save_path is not None:
if save_path.suffix == ".gpkg":
vector_df.to_file(save_path, driver="GPKG")
else:
vector_df.to_file(save_path, driver="ESRI Shapefile")
save_path.chmod(0o664)
return self._load_from_dataframe(vector_df, tolerance=self._tolerance)
def _load_from_graph_path(self, graph_path: pathlib.Path) -> gpd.GeoDataFrame:
"""
Load networkx graph and dataframe objects from a pickle file.
Args:
graph_path (pathlib.Path): Path to a pickle file. Can be compressed
with gzip or bz2.
Returns:
gpd.GeoDataFrame: The dataframe containing polygon objects.
"""
if graph_path.suffix == ".bz2":
with bz2.BZ2File(graph_path, "rb") as bz2_file:
data = pickle.load(bz2_file)
elif graph_path.suffix == ".gz":
with gzip.GzipFile(graph_path, "rb") as gz_file:
data = pickle.loads(gz_file.read())
else:
with open(graph_path, "rb") as file:
data = pickle.load(file)
self.graph = data["graph"]
return data["dataframe"]
def save_graph(
self,
save_path: Union[str, pathlib.Path],
overwrite: bool = False,
pickle_protocol: int = pickle.DEFAULT_PROTOCOL,
) -> None:
"""
Save graph with attributes and dataframe as pickle file. Can be compressed.
Args:
save_path (Union[pathlib.Path, str]): Path to a pickle file. Can be
compressed with gzip or bz2 by passing filenames ending in `gz` or
`bz2`.
overwrite (bool, optional): If True, an existing file at `save_path`
will be overwritten. Else throws an error. Defaults to False.
pickle_protocol (int, optional): Selects the pickle protocol that is used
for python object serealisation. Supported protocols are explained here:
https://docs.python.org/3/library/pickle.html#data-stream-format
Defaults to pickle.DEFAULT_PROTOCOL (4 in python 3.8).
Raises:
ValueError: If `save_path` is not a pickle, gz, or bz2 file.
"""
save_path = pathlib.Path(save_path)
if not overwrite and save_path.exists():
raise UserWarning(
f"A file already exists at {save_path}. To overwrite, ",
"set the `overwrite` flag to True.",
)
if save_path.suffix not in (".pickle", ".pkl", ".gz", ".bz2"):
raise ValueError(
"Argument `save_path` should end in `.pickle`, `.pkl`, `.gz` or `.bz2` "
"to indicate a pickle file or compressed pickle file."
)
data = {"graph": self.graph, "dataframe": self.df}
if save_path.suffix == ".bz2":
with bz2.BZ2File(save_path, "wb") as bz2_file:
pickle.dump(data, bz2_file, protocol=pickle_protocol)
elif save_path.suffix == ".gz":
with gzip.GzipFile(save_path, "wb") as gz_file:
gz_file.write(pickle.dumps(data, protocol=pickle_protocol))
else:
with open(save_path, "wb") as file:
pickle.dump(data, file, protocol=pickle_protocol)
save_path.chmod(0o664)
def _load_from_dataframe(
self,
df: gpd.GeoDataFrame,
tolerance: float = 0.0,
) -> gpd.GeoDataFrame:
"""
Convert geopandas dataframe to networkx graph.
This code takes around 3 minutes to run on JASMIN for the Chernobyl
habitat data.
Args:
df (gpd.GeoDataFrame): GeoDataFrame containing polygon objects from
a shape file.
tolerance (float, optional): Adds edges between neighbours that are
at most `tolerance` units apart. Defaults to 0.
Raises:
ValueError: If `tolerance` < 0, if `class_label` or `geometry` are
not columns in the dataframe.
Returns:
gpd.GeoDataFrame: The dataframe containing polygon objects.
"""
if tolerance < 0.0:
raise ValueError("`tolerance` must be greater than 0.")
if self._columns_to_rename is not None:
df = df.rename(columns=self._columns_to_rename)
if "class_label" not in df.columns:
raise ValueError("`class_label` must be a column in the dataframe.")
if "geometry" not in df.columns:
raise ValueError("`geometry` must be a column in the dataframe.")
# Assign crs
if df.crs is None:
df.crs = self._crs
if self._crs is not None:
df = df.to_crs(self._crs)
# Reset index to ensure consistent indices
df = df.reset_index(drop=True)
# Using this list and iterating through it is slightly faster than
# iterating through df due to the dataframe overhead
geom: List[shapely.Polygon] = df["geometry"].tolist()
# this dict maps polygon row numbers in df to a list
# of neighbouring polygon row numbers
graph_dict = {}
if tolerance > 0:
# Expand the borders of the polygons by `tolerance```
new_polygons: List[shapely.Polygon] = (
df["geometry"].buffer(tolerance).tolist()
)
# Creating nodes (=vertices) and finding neighbors
for index, polygon in tqdm(
enumerate(geom),
desc="Step 1 of 2: Creating nodes and finding neighbours",
total=len(geom),
):
if tolerance > 0:
# find the indexes of all polygons which intersect with this one
neighbours = df.sindex.query(
new_polygons[index], predicate="intersects"
)
else:
neighbours = df.sindex.query(polygon, predicate="intersects")
graph_dict[index] = neighbours
# add each polygon as a node to the graph with useful attributes
self.graph.add_node(
index,
rep_point=polygon.representative_point(),
area=polygon.area,
perimeter=polygon.length,
bounds=polygon.bounds,
)
# iterate through the dict and add edges between neighbouring polygons
for polygon_id, neighbours in tqdm(
graph_dict.items(), desc="Step 2 of 2: Adding edges"
):
for neighbour_id in neighbours:
if polygon_id != neighbour_id:
self.graph.add_edge(polygon_id, neighbour_id)
# add index name
df.index.name = "node_index"
return df
def merge_nodes(
self,
node_list: List[int],
class_label: Union[int, str],
final_index: int = None,
) -> None:
"""
Merge a list of nodes in the graph together into a single node.
This will create a node with a neighbour list and polygon which is the
union of the nodes in `node_list`.
Args:
node_list (List[int]): List of integer node indexes in the graph.
class_label (int or str): Class label for the resulting node.
final_index (int, optional): Index to assign to the resulting node.
Defaults to None, in which case it becomes the highest valid index
in the dataframe + 1.
Raises:
ValueError: If `final_index` is an existing node not in `node_list`,
or if `node_list` does not contain any existing nodes.
"""
node_list = [node for node in node_list if self.graph.has_node(node)]
if len(node_list) == 0:
raise ValueError("`node_list` must contain at least one node in the graph.")
if final_index is None:
final_index = self.df.last_valid_index() + 1
elif final_index not in node_list and self.graph.has_node(final_index):
raise ValueError(
"`final_index` cannot be an existing node that is not in `node_list`."
)
# Build set of all neighbours of nodes in node_list, excluding the
# nodes in node_list.
adjacency_set = set()
for node in node_list:
adjacency_set.update(list(self.graph.neighbors(node)))
adjacency_set -= adjacency_set.intersection(node_list)
# Build union polygon.
polygon = self.df["geometry"].loc[node_list].unary_union
# Remove nodes from graph and rows from df
self._remove_nodes(node_list)
# Add final node to graph and df
self._add_node(
final_index, adjacency_set, geometry=polygon, class_label=class_label
)
def merge_classes(
self, class_list: List[Union[str, int]], new_name: Union[str, int]
) -> None:
"""
Merge multiple classes together into one by renaming the class labels.
Warning: this can be very slow when merging classes with a lot of nodes.
Args:
new_name (Union[str, int]): The new name for the combined class,
either a string or an int.
class_list (List): The list of names of class labels to combine.
Every name in the list must be in the GeoGraph.
Raises:
ValueError: If `class_list` contains a class name not already in
the GeoGraph.
"""
if not set(class_list).issubset(self.df["class_label"].unique()):
raise ValueError("`class_list` must only contain valid class names.")
# Get set of indices of nodes for the new class
node_set = set(
self.df.loc[self.df["class_label"].isin(class_list), "class_label"].index
)
# rename class labels
self.df.loc[self.df["class_label"].isin(class_list), "class_label"] = new_name
merged_neighbours = set()
while True:
num_merges = 0
for node in node_set:
# Skip iteration if node has already been merged
if node in merged_neighbours:
continue
nodes_to_merge = [node]
# Get list of neighbours of node, and append them to the merge list
# if they have the label of the new class
neighbours = self.graph[node]
for neighbour in neighbours:
if neighbour in node_set:
nodes_to_merge.append(neighbour)
# Merge nodes with neighbours where both have the label of the new
# class. Crucially, we assign the merged node an index in `node_set`.
self.merge_nodes(nodes_to_merge, class_label=new_name, final_index=node)
merged_neighbours.update(nodes_to_merge)
num_merges += 1
if num_merges == 0:
break
def add_habitat(
self,
name: str,
valid_classes: List[Union[str, int]],
barrier_classes: Optional[List[Union[str, int]]] = None,
max_travel_distance: float = 0.0,
add_distance: bool = False,
add_component_edges: bool = False,
) -> None:
"""
Create HabitatGeoGraph object and store it in habitats dictionary.
Creates a habitat subgraph of the main GeoGraph that only contains edges
between nodes in `valid_classes` as long as they are less than
`max_travel_distance` apart. All nodes which are not in `valid_classes`
are not in the resulting habitat graph. This graph is then stored as its
own HabitatGeoGraph object with all meta information.
The optional argument `barrier_classes` allows for a list of class labels
which block the path between two nodes in `valid_classes`.
Warning: The current pathfinding code is experimental and will only
remove an edge between two classes within the max travel distance if the
barrier class node in the middle *completely* blocks the path, which is
rare. A full version of the pathfinding code is currently under development
and will become available in a later release.
Warning: In a large dataset, passing values to `barrier_classes` will often
make this function significantly slower, up to an order of magnitude.
Args:
name (str): The name of the habitat.
valid_classes (List): A list of class labels which make up the habitat.
barrier_classes (List): A list of class labels which are barrier classes.
The program will check if there are any nodes with a barrier class
label completely blocking the path between two nodes less than
`max_travel_distance` apart, and if so the edge will not be added.
Note that this is only applicable in rare cases - in many instances
the path will not be completely blocked and therefore the result
will be the same as if there were no barrier classes.
Defaults to None.
max_travel_distance (float): The maximum distance the animal(s) in
the habitat can travel through non-habitat areas. The habitat graph
will contain edges between any two nodes that have a class label in
`valid_classes`, as long as they are less than `max_travel_distance`
units apart. Defaults to 0, which will only create edges between
directly neighbouring areas.
add_distance (bool, optional): Whether or not to add the distance
between polygons as an edge attribute in the habitat graph. Defaults
to False.
add_component_edges (bool, optional): Whether to add edges between
nodes in the ComponentGeoGraph (which is automatically created as an
attribute of the resulting HabitatGeoGraph) with edge weights that
are the distance between neighbouring components. Can be
computationally expensive. Defaults to False.
Raises:
ValueError: If max_travel_distance < 0.
"""
if max_travel_distance < 0.0:
raise ValueError("`max_travel_distance` must be greater than 0.")
if barrier_classes is None:
barrier_classes = []
hgraph: nx.Graph = deepcopy(self.graph)
# Remove all edges in the graph, then at the end we only have edges
# between nodes less than `max_travel_distance` apart
hgraph.clear_edges()
# Get dict to convert between iloc indexes and loc indexes
# These are different only if nodes have been removed from the df
idx_dict: Dict[int, int] = dict(zip(range(len(self.df)), self.df.index.values))
# Get dicts of polygons and buff polygons to avoid repeatedly querying
# the dataframe. These dicts accept loc indexes
polygons: Dict[int, shapely.Polygon] = self.df["geometry"].to_dict()
if max_travel_distance > 0:
# Vectorised buffer on the entire df to calculate the expanded polygons
# used to get intersections.
buff_polygons = self.df["geometry"].buffer(max_travel_distance).to_dict()
# Remove non-habitat nodes from habitat graph
# np.where is very fast here and gets the iloc based indexes
# Combining it with the set comprehension reduces time by an order of
# magnitude compared to `set(self.df.loc[])`
valid_class_bool = np.isin(self.class_label, valid_classes)
invalid_idx = {idx_dict[i] for i in np.where(~valid_class_bool)[0]}
hgraph.remove_nodes_from(invalid_idx)
# get list of barrier node indices (iloc)
barrier_indices = set(np.where( | np.isin(self.class_label, barrier_classes) | numpy.isin |
import argparse
import sys, os
import imageio
import tensorflow as tf
import Classification_BatchDataset
import TensorflowUtils as utils
import pickle
import time
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Conv2D, MaxPool2D, Dropout, Flatten, Dense, Input, Lambda
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.metrics import Accuracy
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import regularizers
from tensorflow.keras.regularizers import l2
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import Callback
from sklearn.utils import shuffle
import numpy as np
import numpy.random as rng
FLAGS = None
def loadimgs(path,n = 0):
'''
path => Path of train directory or test directory
'''
X=[]
y = []
cat_dict = {}
lang_dict = {}
curr_y = n
# we load every alphabet seperately so we can isolate them later
for alphabet in os.listdir(path):
print("loading alphabet: " + alphabet)
lang_dict[alphabet] = [curr_y,None]
alphabet_path = os.path.join(path,alphabet)
# every letter/category has it's own column in the array, so load seperately
for letter in os.listdir(alphabet_path):
cat_dict[curr_y] = (alphabet, letter)
category_images=[]
letter_path = os.path.join(alphabet_path, letter)
# read all the images in the current category
dirlist = os.listdir(letter_path)
if len(dirlist)>1:
for filename in dirlist:
image_path = os.path.join(letter_path, filename)
image = imageio.imread(image_path)
category_images.append(image)
# print(len(category_images))
y.append(curr_y)
try:
uu = np.stack(category_images)
X.append(uu)
# edge case - last one
except ValueError as e:
print(e)
print("error - category_images:", category_images)
print(letter)
curr_y += 1
lang_dict[alphabet][1] = curr_y - 1
y = np.vstack(y)
X = np.stack(X)
return X,y,lang_dict
# def initialize_weights(shape, name=None):
# """
# The paper, http://www.cs.utoronto.ca/~gkoch/files/msc-thesis.pdf
# suggests to initialize CNN layer weights with mean as 0.0 and standard deviation of 0.01
# """
# return tf.random.normal(shape, mean = 0.0, stddev = 0.01)
#
# def initialize_bias(shape, name=None):
# """
# The paper, http://www.cs.utoronto.ca/~gkoch/files/msc-thesis.pdf
# suggests to initialize CNN layer bias with mean as 0.5 and standard deviation of 0.01
# """
# return tf.random.normal(shape, mean = 0.5, stddev = 0.01)
def get_siamese_model(input_shape):
"""
Model architecture based on the one provided in: http://www.cs.utoronto.ca/~gkoch/files/msc-thesis.pdf
"""
# Define the tensors for the two input images
left_input = Input(input_shape)
right_input = Input(input_shape)
initialize_weights = tf.keras.initializers.RandomNormal(mean=0., stddev=0.01)
initialize_bias = tf.keras.initializers.RandomNormal(mean=0.5, stddev=0.01)
# Convolutional Neural Network
model = Sequential([
Conv2D(64, (10,10), activation='relu', input_shape=input_shape,
kernel_initializer=initialize_weights, kernel_regularizer=l2(2e-4)),
MaxPool2D(),
Conv2D(128, (7,7), activation='relu',
kernel_initializer=initialize_weights,
bias_initializer=initialize_bias, kernel_regularizer=l2(2e-4)),
MaxPool2D(),
Conv2D(128, (4,4), activation='relu', kernel_initializer=initialize_weights,
bias_initializer=initialize_bias, kernel_regularizer=l2(2e-4)),
MaxPool2D(),
Conv2D(256, (4,4), activation='relu', kernel_initializer=initialize_weights,
bias_initializer=initialize_bias, kernel_regularizer=l2(2e-4)),
Flatten(),
Dense(4096, activation='sigmoid',
kernel_regularizer=l2(1e-3),
kernel_initializer=initialize_weights,bias_initializer=initialize_bias)
])
# Generate the encodings (feature vectors) for the two images
encoded_l = model(left_input)
encoded_r = model(right_input)
# Add a customized layer to compute the absolute difference between the encodings
L1_layer = Lambda(lambda tensors:tf.math.abs(tensors[0] - tensors[1]))
L1_distance = L1_layer([encoded_l, encoded_r])
# Add a dense layer with a sigmoid unit to generate the similarity score
prediction = Dense(1,activation='sigmoid',bias_initializer=initialize_bias)(L1_distance)
# Connect the inputs with the outputs
siamese_net = Model(inputs=[left_input,right_input],outputs=prediction)
# return the model
return siamese_net
def get_batch(batch_size,s="train"):
"""Create batch of n pairs, half same class, half different class"""
if s == 'train':
X = Xtrain
categories = train_classes
else:
X = Xval
categories = val_classes
n_classes, n_examples, h, w = X.shape
# randomly sample several classes to use in the batch
categories = rng.choice(n_classes,size=(batch_size,),replace=False)
# initialize 2 empty arrays for the input image batch
pairs=[np.zeros((batch_size, h, w,1)) for i in range(2)]
# initialize vector for the targets
targets= | np.zeros((batch_size,)) | numpy.zeros |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gps_building_blocks.ml.diagnostics.binary_classification."""
from absl.testing import absltest
import numpy as np
import pandas as pd
import sklearn.metrics
from absl.testing import parameterized
from gps_building_blocks.ml.diagnostics import binary_classification
TEST_DATA = pd.DataFrame({
'label': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'prediction': [
0.7, 0.63, 0.4, 0.77, 0.45, 0.8, 0.41, 0.82, 0.7, 0.6, 0.5, 0.45, 0.74,
0.11, 0.21, 0.05, 0.67, 0.79, 0.60, 0.10
],
'num_feature_1': [
20, 30, 22.5, 19, 30, 32, 15.6, 17.87, 25.45, 17.3, 30.2, 33, 27.5,
25.1, 35.6, 33.26, 38.5, 31.23, 28.44, 30.32
],
'cat_feature_1': [
'M', 'M', 'F', 'M', 'M', 'F', 'M', 'M', 'F', 'F', 'F', 'F', 'M', 'M',
'F', 'M', 'F', 'M', 'M', 'F'
]
})
class BinaryClassificationDiagnosticsTest(parameterized.TestCase,
absltest.TestCase):
def test_calc_performance_metrics_returns_correct_values(self):
expected_results = {
'prop_positives': 0.5000,
'auc_roc': 0.7100,
'auc_pr': 0.7278,
'binarize_threshold': 0.5000,
'accuracy': 0.6500,
'true_positive_rate': 0.7000,
'true_negative_rate': 0.6000,
'precision': 0.6364,
'f1_score': 0.6667
}
rerults = (
binary_classification.calc_performance_metrics(
labels=np.array(TEST_DATA['label']),
probability_predictions=np.array(TEST_DATA['prediction'])))
self.assertDictEqual(expected_results, rerults)
def test_resulted_bin_metrics_does_not_contain_nas(self):
results = (
binary_classification.calc_bin_metrics(
labels=np.array(TEST_DATA['label']),
probability_predictions=np.array(TEST_DATA['prediction']),
number_bins=3))
self.assertFalse(results.isna().values.any())
def test_calc_bin_metrics_returns_correct_values(self):
bin_number = [1, 2, 3]
bin_size = [7, 5, 8]
positive_instances = [5, 2, 3]
precision = [0.7143, 0.4000, 0.3750]
prop_positives = [0.5000, 0.5000, 0.5000]
precision_uplift = [1.4286, 0.8000, 0.7500]
coverage = [0.5000, 0.2000, 0.3000]
results = (
binary_classification.calc_bin_metrics(
labels=np.array(TEST_DATA['label']),
probability_predictions=np.array(TEST_DATA['prediction']),
number_bins=3))
self.assertListEqual(results['bin_number'].tolist(), bin_number)
self.assertListEqual(results['bin_size'].tolist(), bin_size)
self.assertListEqual(results['positive_instances'].tolist(),
positive_instances)
self.assertListEqual(results['precision'].tolist(), precision)
self.assertListEqual(results['prop_positives'].tolist(), prop_positives)
self.assertListEqual(results['precision_uplift'].tolist(), precision_uplift)
self.assertListEqual(results['coverage'].tolist(), coverage)
def test_plot_bin_metrics_returns_bar_plots_with_correct_elements(self):
bin_metrics = (
binary_classification.calc_bin_metrics(
labels=np.array(TEST_DATA['label']),
probability_predictions=np.array(TEST_DATA['prediction']),
number_bins=3))
x_data = list(bin_metrics['bin_number'])
y_data_precision = list(bin_metrics['precision'])
y_data_precision_uplift = list(bin_metrics['precision_uplift'])
y_data_coverage = list(bin_metrics['coverage'])
plots = binary_classification.plot_bin_metrics(bin_metrics)
plot_1 = plots[0]
plot_2 = plots[1]
plot_3 = plots[2]
with self.subTest(name='test the elements of precision bar plot'):
self.assertListEqual(
x_data, [int(tick.get_text()) for tick in plot_1.get_xticklabels()])
self.assertListEqual(y_data_precision,
[h.get_height() for h in plot_1.patches])
with self.subTest(name='test the elements of precision uplift bar plot'):
self.assertListEqual(
x_data, [int(tick.get_text()) for tick in plot_2.get_xticklabels()])
self.assertListEqual(y_data_precision_uplift,
[h.get_height() for h in plot_2.patches])
with self.subTest(name='test the elements of coverage bar plot'):
self.assertListEqual(
x_data, [int(tick.get_text()) for tick in plot_3.get_xticklabels()])
self.assertListEqual(y_data_coverage,
[h.get_height() for h in plot_3.patches])
def test_calc_cumulative_bin_metrics_returns_correct_values(self):
cumulative_bin_number = [1, 2, 3]
bin_size = [7, 13, 20]
bin_size_proportion = [0.3500, 0.6500, 1.0000]
positive_instances = [5, 7, 10]
precision = [0.7143, 0.5385, 0.5000]
coverage = [0.5000, 0.7000, 1.0000]
prop_label_positives = [0.5000, 0.5000, 0.5000]
precision_uplift = [1.4286, 1.0770, 1.0000]
results = (
binary_classification.calc_cumulative_bin_metrics(
labels=np.array(TEST_DATA['label']),
probability_predictions=np.array(TEST_DATA['prediction']),
number_bins=3))
self.assertListEqual(results['cumulative_bin_number'].tolist(),
cumulative_bin_number)
self.assertListEqual(results['bin_size'].tolist(), bin_size)
self.assertListEqual(results['bin_size_proportion'].tolist(),
bin_size_proportion)
self.assertListEqual(results['positive_instances'].tolist(),
positive_instances)
self.assertListEqual(results['precision'].tolist(), precision)
self.assertListEqual(results['coverage (recall)'].tolist(), coverage)
self.assertListEqual(results['prop_label_positives'].tolist(),
prop_label_positives)
self.assertListEqual(results['precision_uplift'].tolist(), precision_uplift)
def test_plot_cumulative_bin_metrics_returns_correct_plots(self):
cumulative_bin_metrics = (
binary_classification.calc_cumulative_bin_metrics(
labels=np.array(TEST_DATA['label']),
probability_predictions=np.array(TEST_DATA['prediction']),
number_bins=3))
x_data = list(cumulative_bin_metrics['cumulative_bin_number'])
y_data_precision = list(cumulative_bin_metrics['precision'])
y_data_precision_uplift = list(cumulative_bin_metrics['precision_uplift'])
y_data_coverage = list(cumulative_bin_metrics['coverage (recall)'])
plots = (
binary_classification.plot_cumulative_bin_metrics(
cumulative_bin_metrics))
plot_1 = plots[0]
plot_2 = plots[1]
plot_3 = plots[2]
with self.subTest(name='test the elements of precision bar plot'):
self.assertListEqual(
x_data, [int(tick.get_text()) for tick in plot_1.get_xticklabels()])
self.assertListEqual(y_data_precision,
[h.get_height() for h in plot_1.patches])
with self.subTest(name='test the elements of precision uplift bar plot'):
self.assertListEqual(
x_data, [int(tick.get_text()) for tick in plot_2.get_xticklabels()])
self.assertListEqual(y_data_precision_uplift,
[h.get_height() for h in plot_2.patches])
with self.subTest(name='test the elements of coverage bar plot'):
self.assertListEqual(
x_data, [int(tick.get_text()) for tick in plot_3.get_xticklabels()])
self.assertListEqual(y_data_coverage,
[h.get_height() for h in plot_3.patches])
def test_plot_binned_features_return_plots_with_correct_elements(self):
number_bins = 3
prediction_column_name = 'prediction'
# prepare results
test_data = TEST_DATA.sort_values(
by=prediction_column_name, ascending=False)
test_data['bin_number'] = (
number_bins -
pd.qcut(test_data[prediction_column_name], q=number_bins, labels=False))
# stats for the numerical feature
num_binned_test_data = test_data[['bin_number', 'num_feature_1']]
num_binned_test_data = num_binned_test_data.rename(
columns={'num_feature_1': 'v'})
num_bin_stats = (
num_binned_test_data[['bin_number',
'v']].groupby('bin_number',
as_index=False).agg('mean'))
num_bin_stats.columns = ['bin_number', 'var_mean']
# stats for the categorical feature
cat_binned_test_data = test_data[['bin_number', 'cat_feature_1']]
cat_binned_test_data = cat_binned_test_data.rename(
columns={'cat_feature_1': 'categories'})
bin_counts = (
cat_binned_test_data.groupby('bin_number', as_index=False).count())
bin_counts.columns = ['bin_number', 'total_count']
cat_binned_test_data['temp_column'] = 1
bin_category_counts = (
cat_binned_test_data.groupby(['bin_number', 'categories'],
as_index=False).count())
bin_category_counts.columns = ['bin_number', 'categories', 'count']
cat_bin_stats = pd.merge(bin_category_counts, bin_counts, on='bin_number')
cat_bin_stats['proportion'] = (
round((cat_bin_stats['count'] / cat_bin_stats['total_count']) * 100, 5))
cat_bin_stats = cat_bin_stats.sort_values('categories')
num_plot, cat_plot = (
binary_classification.plot_binned_features(
data=TEST_DATA,
number_bins=number_bins,
prediction_column_name=prediction_column_name,
feature_names=('num_feature_1', 'cat_feature_1'),
feature_types=('numerical', 'categorical')))
with self.subTest(name='test the elements of numerical feature plot'):
self.assertEqual('num_feature_1', num_plot.get_title())
self.assertListEqual(
list(num_bin_stats['bin_number']),
[int(tick.get_text()) for tick in num_plot.get_xticklabels()])
self.assertListEqual(
list(num_bin_stats['var_mean']),
[h.get_height() for h in num_plot.patches])
with self.subTest(name='test the elements of categorical feature plot'):
self.assertEqual('cat_feature_1', cat_plot.get_title())
self.assertListEqual(
list(set(cat_bin_stats['bin_number'])),
[int(tick.get_text()) for tick in cat_plot.get_xticklabels()])
self.assertListEqual(
list(cat_bin_stats['proportion']),
[round(h.get_height(), 5) for h in cat_plot.patches])
def test_plot_predicted_probabilities(self):
plot = binary_classification.plot_predicted_probabilities(
labels=np.array(TEST_DATA['label']),
probability_predictions=np.array(TEST_DATA['prediction']),
colors=('b', 'g'),
print_stats=True,
fig_width=20,
fig_height=15)
with self.subTest(name='test the title of plot'):
self.assertEqual('Distribution of predicted probabilities',
plot.get_title())
with self.subTest(name='test the label of the plot'):
preds_plot0 = TEST_DATA[TEST_DATA['label'] == 0]['prediction']
preds_plot1 = TEST_DATA[TEST_DATA['label'] == 1]['prediction']
expect_legends = [
'class[%s]' % (str(0)) + ': mean=%.4f, std=%.4f, median=%.4f' %
(np.mean(preds_plot0), np.std(preds_plot0), np.median(preds_plot0)),
'class[%s]' % (str(1)) + ': mean=%.4f, std=%.4f, median=%.4f' %
(np.mean(preds_plot1), np.std(preds_plot1), np.median(preds_plot1))
]
actual_legends = [l.get_text() for l in plot.get_legend().get_texts()]
self.assertListEqual(expect_legends, actual_legends)
@parameterized.named_parameters(
dict(
testcase_name='test_plot_roc_curve',
plot_name='roc',
print_stats=True,
fig_width=10,
fig_height=10,
curve_color='b'),
dict(
testcase_name='test_plot_pr_curve',
plot_name='precision-recall',
print_stats=True,
fig_width=10,
fig_height=10,
curve_color='b'))
def test_plots(self, plot_name, print_stats, fig_width, fig_height,
curve_color):
if plot_name == 'roc':
plot = binary_classification.plot_roc_curve(
labels=np.array(TEST_DATA['label']),
probability_predictions= | np.array(TEST_DATA['prediction']) | numpy.array |
# -*- coding: utf-8 -*-
"""Test for panel robust covariance estimators after pooled ols
this follows the example from xtscc paper/help
Created on Tue May 22 20:27:57 2012
Author: <NAME>
"""
from statsmodels.compat.python import lmap
import numpy as np
from numpy.testing import assert_almost_equal
from statsmodels.regression.linear_model import OLS
from statsmodels.tools.tools import add_constant
import statsmodels.stats.sandwich_covariance as sw
def test_panel_robust_cov():
import statsmodels.datasets.grunfeld as gr
from .results.results_panelrobust import results as res_stata
dtapa = gr.data.load_pandas()
#Stata example/data seems to miss last firm
dtapa_endog = dtapa.endog[:200]
dtapa_exog = dtapa.exog[:200]
res = OLS(dtapa_endog, add_constant(dtapa_exog[['value', 'capital']],
prepend=False)).fit()
#time indicator in range(max Ti)
time = np.asarray(dtapa_exog[['year']])
time -= time.min()
time = np.squeeze(time).astype(int)
#sw.cov_nw_panel requires bounds instead of index
tidx = [(i*20, 20*(i+1)) for i in range(10)]
#firm index in range(n_firms)
firm_names, firm_id = np.unique(np.asarray(dtapa_exog[['firm']], 'S20'),
return_inverse=True)
#panel newey west standard errors
cov = sw.cov_nw_panel(res, 0, tidx, use_correction='hac')
#dropping numpy 1.4 soon
#np.testing.assert_allclose(cov, res_stata.cov_pnw0_stata, rtol=1e-6)
assert_almost_equal(cov, res_stata.cov_pnw0_stata, decimal=4)
cov = sw.cov_nw_panel(res, 1, tidx, use_correction='hac')
#np.testing.assert_allclose(cov, res_stata.cov_pnw1_stata, rtol=1e-6)
assert_almost_equal(cov, res_stata.cov_pnw1_stata, decimal=4)
cov = sw.cov_nw_panel(res, 4, tidx) #check default
#np.testing.assert_allclose(cov, res_stata.cov_pnw4_stata, rtol=1e-6)
assert_almost_equal(cov, res_stata.cov_pnw4_stata, decimal=4)
#cluster robust standard errors
cov_clu = sw.cov_cluster(res, firm_id)
assert_almost_equal(cov_clu, res_stata.cov_clu_stata, decimal=4)
#cluster robust standard errors, non-int groups
cov_clu = sw.cov_cluster(res, lmap(str, firm_id))
assert_almost_equal(cov_clu, res_stata.cov_clu_stata, decimal=4)
#Driscoll and Kraay panel robust standard errors
rcov = sw.cov_nw_groupsum(res, 0, time, use_correction=0)
assert_almost_equal(rcov, res_stata.cov_dk0_stata, decimal=4)
rcov = sw.cov_nw_groupsum(res, 1, time, use_correction=0)
assert_almost_equal(rcov, res_stata.cov_dk1_stata, decimal=4)
rcov = sw.cov_nw_groupsum(res, 4, time) #check default
| assert_almost_equal(rcov, res_stata.cov_dk4_stata, decimal=4) | numpy.testing.assert_almost_equal |
import numpy as np
import pandas as pd
import xarray as xr
from scipy.stats import t
class oat:
"""
Ordinal adequacy tests (OATs).
Attributes
----------
schedule_pos : list
Schedules whose scores are counted as positive.
behav_score_pos : behav_score object
Behavioral score used for positive schedules.
schedule_neg : list
Schedules whose scores are counted as negative.
behav_score_neg : behav_score object or None
Behavioral score used for negative schedules.
Methods
-------
compute_total(self, data_dict)
Compute total OAT score (contrast between schedules, i.e. groups).
conf_interval(self, data, conf_level = 0.95)
Compute OAT score confidence interval.
mean_resp(self, data)
Compute means of the responses used in computing the OAT.
"""
def __init__(self, schedule_pos, behav_score_pos, schedule_neg = None, behav_score_neg = None):
"""
Parameters
----------
schedule_pos : list
Schedules whose scores are counted as positive.
behav_score_pos : behav_score object
Behavioral score used for positive schedules.
schedule_neg : list or None, optional
Schedules whose scores are counted as negative.
Defaults to None.
behav_score_neg : behav_score object or None, optional
Behavioral score used for negative schedules.
Defaults to None (if there are no negative schedules)
or otherwise to the same behavioral score as
for the positive schedules.
"""
self.schedule_pos = schedule_pos
self.behav_score_pos = behav_score_pos
self.schedule_neg = schedule_neg
if not schedule_neg is None:
if behav_score_neg is None:
self.behav_score_neg = behav_score_pos
else:
self.behav_score_neg = behav_score_neg
else:
self.behav_score_neg = None
def compute_total(self, data):
"""
Compute OAT score (contrast between schedules, i.e. groups).
Parameters
----------
data : dict
Dictionary of behavioral data. Each element is an xarray dataset from a different schedule.
The keys are schedule names.
Returns
-------
total : float
Difference between mean behavioral scores of positive and negative schedules,
or mean behavioral score (if there are no negative schedules).
"""
# positive schedules
pos_scores = np.array([])
for s in self.schedule_pos:
pos_scores = np.append(pos_scores, self.behav_score_pos.compute_scores(ds = data[s]))
pos_mean = np.mean(pos_scores)
if not self.schedule_neg is None:
# negative schedules
neg_scores = np.array([])
for s in self.schedule_neg:
neg_scores = np.append(neg_scores, self.behav_score_neg.compute_scores(ds = data[s]))
neg_mean = np.mean(neg_scores)
total = pos_mean - neg_mean
else:
total = pos_mean
return total
def conf_interval(self, data, conf_level = 0.95):
"""
Compute OAT score confidence interval.
Parameters
----------
data : dataset or dict
Either an xarray dataset from a single experimental schedule, or a dictionary of such
datasets (with keys that are schedule names).
conf_level : float, optional
Confidence level of the interval. Defaults to 0.95.
Returns
-------
interval : dict
lower : float
center : float
upper : float
Notes
-----
Confidence intervals are constructed using Student's t distribution.
If there are only positive schedules, we get a one sample confidence interval for the mean.
If there are both positive and negative schedules, we get a confidence interval for the
mean difference (positive schedules - negative schedules).
"""
# Deal with case where input is a single dataset, rather than a dictionary of datasets.
if type(data) is dict:
data_dict = data
else:
data_dict = {self.schedule_pos[0] : data} # make a dict containing the input data
# positive schedules
pos_scores = np.array([])
for s in self.schedule_pos:
pos_scores = np.append(pos_scores, self.behav_score_pos.compute_scores(ds = data[s]))
pos_mean = np.mean(pos_scores)
pos_var = np.var(pos_scores)
pos_df = len(pos_scores) - 1
alpha = 1 - conf_level
if not self.schedule_neg is None:
# two sample interval (mean difference)
neg_scores = np.array([])
for s in self.schedule_neg:
neg_scores = np.append(neg_scores, self.behav_score_neg.compute_scores(ds = data[s]))
neg_mean = np.mean(neg_scores)
neg_var = np.var(neg_scores)
neg_df = len(neg_scores) - 1
pooled_var = (pos_var*pos_df + neg_var*neg_df)/(pos_df + neg_df)
sem = np.sqrt(pooled_var)*np.sqrt(1/(pos_df + 1) + 1/(neg_df + 1))
mu = pos_mean - neg_mean
else:
# one sample interval
neg_df = 0
sem = np.sqrt(pos_var/pos_df)
mu = pos_mean
t_crit = t.ppf(q = 1 - alpha/2, df = pos_df + neg_df, loc = 0, scale = 1)
lower = mu - sem*t_crit
upper = mu + sem*t_crit
interval = {'lower' : lower, 'mean' : mu, 'upper' : upper}
return interval
def mean_resp(self, data):
"""
Compute means of the behavior used in computing the OAT
averaged across individuals and time steps.
Parameters
----------
data: dataframe or dict
Either an xarray dataset from a single experimental schedule, or a dictionary of such
datasets (with keys that are schedule names).
Returns
-------
mean_resp: dataframe
Mean responses for relevant trial types.
"""
# Deal with case where input is a single dataset, rather than a dictionary of datasets.
if type(data) is dict:
data_dict = data
else:
data_dict = {self.schedule_pos[0] : data} # make a dict containing the input data
# ** positive schedules **
# relevant trial names (i.e. trial types)
if self.behav_score_pos.trial_neg is None:
trial_name = np.unique(self.behav_score_pos.trial_pos)
else:
trial_name = | np.unique(self.behav_score_pos.trial_pos + self.behav_score_pos.trial_neg) | numpy.unique |
import os
import numpy as np
def gettype(location):
with open(location + "/type", "r") as fopen:
type_pokemon = fopen.read().split("\n")
type_pokemon = [i.split("\t")[4:] for i in type_pokemon]
for i in xrange(len(type_pokemon)):
if len(type_pokemon[i]) == 1:
type_pokemon[i].append("none")
type_pokemon = | np.array(type_pokemon) | numpy.array |
import numpy as np
from typing import List, Tuple
from numba import njit, prange
from gym_rubiks_cube.envs.functions import RotationMatrix3D, getQuadrant
import gym_rubiks_cube.envs.objects3D as o3
@njit
def rasterizeBottomFlatTriangle(v1, v2, v3, width, height):
"""assumes v1.y < v2.y = v3.y"""
object_map = np.ones((width, height), dtype=np.float32) * np.inf
v1[:2], v2[:2], v3[:2] = np.floor(v1[:2]), np.floor(v2[:2]), np.floor(v3[:2])
if v1[1] == v2[1] or v1[1] == v2[1] or v2[0] == v3[0]:
return object_map
slope1 = -(v2[0] - v1[0]) / np.floor(v2[1] - v1[1])
slope2 = -(v3[0] - v1[0]) / np.floor(v3[1] - v1[1])
if v2[0] > v3[0]:
delta_t_x = v2 - v3
else:
delta_t_x = v3 - v2
delta_t_x = delta_t_x[2] / delta_t_x[0]
delta_t_y = v2 - v1
delta_t_y[2] = delta_t_y[2] - delta_t_y[0] * delta_t_x
delta_t_y = -delta_t_y[2] / delta_t_y[1]
curX1 = v1[0]
curX2 = v1[0]
for i in range(int(v1[1]), int(v2[1]) - 1, -1):
curX1_int, curX2_int = int(curX1), int(curX2)
temp = (
v1[2]
+ np.arange(
max(min(curX1_int, curX2_int), 0) - int(v1[0]),
min(max(curX1_int, curX2_int), height - 2) - int(v1[0]) + 1,
)
* delta_t_x
+ (int(v1[1]) - i) * delta_t_y
)
object_map[
max(min(curX1_int, curX2_int), 0) : min(
max(curX1_int, curX2_int) + 1, height - 1
),
i,
] = np.where(
temp < 0, np.inf, temp
) # make sure only vertices infront of the canvas are visible
curX1 += slope1
curX2 += slope2
return object_map
@njit
def rasterizeTopFlatTriangle(v1, v2, v3, width, height):
"""assumes v1.y = v2.y < v3.y"""
object_map = np.ones((width, height), dtype=np.float32) * np.inf
v1[:2], v2[:2], v3[:2] = np.floor(v1[:2]), np.floor(v2[:2]), np.floor(v3[:2])
if v3[1] == v1[1] or v3[1] == v2[1] or v1[0] == v2[0]:
return object_map
slope1 = (v3[0] - v1[0]) / np.floor(v3[1] - v1[1])
slope2 = (v3[0] - v2[0]) / np.floor(v3[1] - v2[1])
minX, maxX = min(v1[0], v2[0]), max(v1[0], v2[0])
if v1[0] > v2[0]:
delta_t_x = v1 - v2
else:
delta_t_x = v2 - v1
delta_t_x = delta_t_x[2] / delta_t_x[0]
delta_t_y = v3 - v2
delta_t_y[2] = delta_t_y[2] - delta_t_y[0] * delta_t_x
delta_t_y = -delta_t_y[2] / delta_t_y[1]
curX1 = v3[0]
curX2 = v3[0]
for i in range(int(v3[1]), int(v1[1]) + 1):
curX1_int, curX2_int = int(curX1), int(curX2)
temp = (
v3[2]
+ np.arange(
max(min(curX1_int, curX2_int), 0) - int(v3[0]),
min(max(curX1_int, curX2_int), height - 2) - int(v3[0]) + 1,
)
* delta_t_x
+ (int(v3[1]) - i) * delta_t_y
)
object_map[
max(min(curX1_int, curX2_int), 0) : min(
max(curX1_int, curX2_int), height - 2
)
+ 1,
i,
] = np.where(
temp < 0, np.inf, temp
) # make sure only vertices infront of the canvas are visible
curX1 += slope1
curX2 += slope2
return object_map
@njit
def rasterizeTriangle(triangle_x_y_t, width, height):
triangle_x_y_t = sorted(triangle_x_y_t, key=lambda x: x[1])
v1, v2, v3 = triangle_x_y_t[0], triangle_x_y_t[1], triangle_x_y_t[2]
if int(v1[1]) == int(v2[1]) and int(v2[1]) == int(v3[1]):
return np.ones((width, height)) * np.inf
# check for trivial case of bottom-flat triangle
elif v1[1] == v2[1]:
return rasterizeBottomFlatTriangle(v3, v1, v2, width, height)
# check for trivial case of top-flat triangle
elif v2[1] == v3[1]:
return rasterizeTopFlatTriangle(v2, v3, v1, width, height)
# need to create artifical vertex to get a bottom-flat and top-flat triangle
else:
v4 = np.array(
[(v1[0] + ((v2[1] - v1[1]) / (v3[1] - v1[1]) * (v3[0] - v1[0]))), v2[1]]
)
# we need to compute t for v4
matrix_v4 = np.array(
[[v2[0] - v1[0], v3[0] - v1[0]], [v2[1] - v1[1], v3[1] - v1[1]]]
)
vector_v4 = v4 - v1[:2]
x_y = np.dot(np.linalg.inv(matrix_v4), vector_v4)
v4 = np.array(
[
v4[0],
v4[1],
v1[2] + x_y[0] * (v2[2] - v1[2]) + x_y[1] * (v3[2] - v1[2]),
]
)
return np.minimum(
rasterizeTopFlatTriangle(v2, v4, v1, width, height),
rasterizeBottomFlatTriangle(v3, v2, v4, width, height),
)
@njit(parallel=True)
def rasterizeTrianglesHelp(tri_x_y_t, width, height):
object_map = np.empty((len(tri_x_y_t) + 1, width, height), dtype=np.float32)
for i in prange(len(tri_x_y_t)):
object_map[i] = rasterizeTriangle(tri_x_y_t[i], width, height)
return object_map
class Scene:
def __init__(
self,
screen_width: int,
screen_height: int,
objects_to_render: List[o3.Renderable],
canvas_distance: float,
bg_color,
) -> None:
# save general parameters
self.width = screen_width
self.height = screen_height
self.canvas_distance = canvas_distance
self.bg_color = np.array(bg_color)
# save render parameters
self.switch = True
self.last_quadrant = None
# set up internal structures for objects to render
# for every object we save which triangles belong to it, in order to later modify objects easily
self.objects = {i: [] for i, elem in enumerate(objects_to_render)}
self.triangle_origins = []
self.triangle_vec1s = []
self.triangle_vec2s = []
self.triangle_fill_colors = []
index_tri = 0
for i, object in enumerate(objects_to_render):
for triangle in object.get_triangles():
self.objects[i].append(index_tri)
self.triangle_origins.append(triangle.origin)
self.triangle_vec1s.append(triangle.vec1)
self.triangle_vec2s.append(triangle.vec2)
self.triangle_fill_colors.append(triangle.fill_color)
index_tri += 1
self.triangle_origins = np.array(self.triangle_origins)
self.triangle_vec1s = np.array(self.triangle_vec1s)
self.triangle_vec2s = np.array(self.triangle_vec2s)
def _getCanvasVecs(
self, pov: np.ndarray, look_point: np.ndarray
) -> Tuple[np.ndarray]:
# create a plane that is perpendicular to the view vector and use it as canvas
u = look_point - pov
cur_quadrant = getQuadrant(u[0], u[1])
if cur_quadrant != -1:
if (
self.last_quadrant != None
and max(cur_quadrant, self.last_quadrant)
- min(cur_quadrant, self.last_quadrant)
== 2
):
self.switch = not self.switch
self.last_quadrant = cur_quadrant
if self.switch:
v = np.array([u[1], -u[0], 0])
else:
v = np.array([-u[1], u[0], 0])
w = np.cross(u, v)
v, w = v / np.linalg.norm(v), w / np.linalg.norm(w)
origin = pov + self.canvas_distance * (u / np.linalg.norm(u))
# move origin to top left of canvas
origin = origin - v * self.width / 2 - w * self.height / 2
return origin, v, w
# something wrong with this, not sure what (don't really wanna spend time fixing it though cause it's too slow anyway)
def _renderRaycast(
self,
pov: np.ndarray,
canvas_origin: np.ndarray,
canvas_vecX: np.ndarray,
canvas_vecY: np.ndarray,
):
# initialize color map
rgb_map = np.empty((self.width, self.height, 3), dtype=np.int16)
t = np.arange(self.height) - self.height / 2
canvas_vec2 = | np.repeat(canvas_vecY[np.newaxis, :], self.height, axis=0) | numpy.repeat |
# pylint: disable=invalid-name
"""Tests for layers.multi_css"""
import numpy as np
import tensorflow as tf
import deepr
def test_layers_multi_css():
"""Compare MultiLogLikelihoodCSS with NumPy implementation."""
batch_size = 2
num_positives = 4
num_negatives = 8
vocab_size = 10
log_likelihood = deepr.layers.MultiLogLikelihoodCSS(
inputs=("positive_logits", "negative_logits", "positive_mask", "negative_mask"),
outputs="log_likelihood",
vocab_size=vocab_size,
)
| np.random.seed(2020) | numpy.random.seed |
from p5 import circle, stroke, fill
import numpy as np
class Boid(object):
def __init__(self,width,height,position,horizon, max_speed, rule1W, rule2W, rule3W
,desired_seperation):
# Width, height = Screen Output Dimensions
# x,y = boids positions
# horizon = It describes how far boid can detect the others
# max_speed = Max speed of each individual in the group
# Rule1 = Cohesion , Rule2 = Seperation, Rule3= Alignment
# rule1W = Weight for the rule1 (as a percentage), i.e. rule1W = 5 --> 5%
# desired_seperation = Minimum distance between each boid
self.width = width
self.height = height
self.position = position
self.max_speed = max_speed
initial_random_velocity = (np.random.rand(2)-0.5) * self.max_speed * 2
self.velocity = initial_random_velocity
self.horizon = horizon
self.rule1W = rule1W
self.rule2W = rule2W
self.rule3W = rule3W
self.desired_seperation = desired_seperation
def show_boid(self):
stroke(255) #white contour colors
fill(0,0,255) #fill with blue
circle( (self.position[0],self.position[1]) ,radius=10)
def update_boid(self):
# Limiting the speed
if np.linalg.norm(self.velocity) > self.max_speed:
self.velocity = (self.velocity/np.linalg.norm(self.velocity)) * self.max_speed
# Then update the position
self.position = np.add(self.position, self.velocity)
def bound_position(self):
# If boids reach the edges, it should come back from other side
if self.position[0] > self.width-1:
self.position[0] = 0
elif self.position[1] > self.height-1:
self.position[1] = 0
elif self.position[0] < 0:
self.position[0] = self.width-1
elif self.position[1] < 0:
self.position[1] = self.height-1
def main_boid(self, boids):
v1 = self.rule1(boids)
v2 = self.rule2(boids)
v3 = self.rule3(boids)
self.bound_position()
self.show_boid()
self.velocity += v1 + v2 + v3
self.update_boid()
# This function is used to move flock to a desired position
# desired_position = Desired target position to move boids
# step_size = determines how much boids will move towards to desired position
# in each iteration as a percent --> step_size = 1 means 1% at each step
def tend_to_place(self,desired_position,step_size):
self.velocity = (desired_position - self.position) * (step_size / 100)
def rule1(self,boids): #Cohesion
center_of_mass = np.zeros(2)
N = 0 #Total boid number
for b in boids:
# self is the boid we are currently looking for. We don't want to take its position
# into account for center of mass that's why we have the expression right of &
if (np.linalg.norm(b.position - self.position) < self.horizon) & (b != self):
center_of_mass += b.position
N += 1
center_of_mass = center_of_mass / (N-1)
target_position = (center_of_mass * self.rule1W) / 100
return target_position
def rule2(self,boids): #Seperation
c = np.zeros(2)
for b in boids:
if ( (np.linalg.norm(b.position - self.position) < self.horizon)
& ( | np.linalg.norm(b.position - self.position) | numpy.linalg.norm |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.normal import Normal
torch.set_default_tensor_type(torch.cuda.FloatTensor)
from typing import Tuple
import math
import numpy as np
import matplotlib.pyplot as plt
import gzip
import itertools
from scipy.spatial import KDTree
import time
from multiprocessing.dummy import Pool as ThreadPool
import multiprocessing
# import copy
device = torch.device('cuda')
class StandardScaler(object):
def __init__(self):
pass
def fit(self, data):
self.mu = np.mean(data, axis=0, keepdims=True)
self.std = np.std(data, axis=0, keepdims=True)
self.std[self.std < 1e-12] = 1.0
self.mu_tensor = torch.from_numpy(self.mu).float().to('cuda')
self.std_tensor = torch.from_numpy(self.std).float().to('cuda')
def transform(self, data):
return (data - self.mu) / self.std
def inverse_transform(self, data):
return self.std * data + self.mu
def transform_tensor(self, data):
return (data - self.mu_tensor) / self.std_tensor
class Swish(nn.Module):
def __init__(self):
super(Swish, self).__init__()
def forward(self, x):
x = x * F.sigmoid(x)
return x
def init_weights(m):
def truncated_normal_init(t, mean=0.0, std=0.01):
torch.nn.init.normal_(t, mean=mean, std=std)
while True:
cond = torch.logical_or(t < mean - 2 * std, t > mean + 2 * std)
if not torch.sum(cond):
break
t = torch.where(cond, torch.nn.init.normal_(torch.ones(t.shape), mean=mean, std=std), t)
return t
if type(m) == nn.Linear or isinstance(m, EnsembleFC):
input_dim = m.in_features
truncated_normal_init(m.weight, std=1 / (2 * np.sqrt(input_dim)))
m.bias.data.fill_(0.0)
class EnsembleFC(nn.Module):
__constants__ = ['in_features', 'out_features']
in_features: int
out_features: int
ensemble_size: int
weight: torch.Tensor
def __init__(self, in_features: int, out_features: int, ensemble_size: int, weight_decay: float = 0., bias: bool = True) -> None:
super(EnsembleFC, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.ensemble_size = ensemble_size
self.weight = nn.Parameter(torch.Tensor(ensemble_size, in_features, out_features))
self.weight_decay = weight_decay
if bias:
self.bias = nn.Parameter(torch.Tensor(ensemble_size, out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) -> None:
pass
def forward(self, input: torch.Tensor) -> torch.Tensor:
w_times_x = torch.bmm(input, self.weight)
return torch.add(w_times_x, self.bias[:, None, :])
def extra_repr(self) -> str:
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
class EnsembleModel(nn.Module):
# ensemble nn
def __init__(self, state_size, action_size, reward_size, ensemble_size, hidden_size=200, learning_rate=1e-3, use_decay=False):
super(EnsembleModel, self).__init__()
self.hidden_size = hidden_size
self.nn1 = EnsembleFC(state_size + action_size, hidden_size, ensemble_size, weight_decay=0.000025)
self.nn2 = EnsembleFC(hidden_size, hidden_size, ensemble_size, weight_decay=0.00005)
self.nn3 = EnsembleFC(hidden_size, hidden_size, ensemble_size, weight_decay=0.000075)
self.nn4 = EnsembleFC(hidden_size, hidden_size, ensemble_size, weight_decay=0.000075)
self.use_decay = use_decay
self.output_dim = state_size + reward_size
self.reward_size = reward_size
self.nn5 = EnsembleFC(hidden_size, self.output_dim * 2, ensemble_size, weight_decay=0.0001)
self.max_logvar = nn.Parameter((torch.ones((1, self.output_dim)).float() / 2).to(device), requires_grad=False)
self.min_logvar = nn.Parameter((-torch.ones((1, self.output_dim)).float() * 10).to(device), requires_grad=False)
self.optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate)
self.apply(init_weights)
self.swish = Swish()
def forward(self, x, mode='rs', ret_log_var=False):
nn1_output = self.swish(self.nn1(x))
nn2_output = self.swish(self.nn2(nn1_output))
nn3_output = self.swish(self.nn3(nn2_output))
nn4_output = self.swish(self.nn4(nn3_output))
nn5_output = self.nn5(nn4_output)
mean = nn5_output[:, :, :self.output_dim]
logvar = self.max_logvar - F.softplus(self.max_logvar - nn5_output[:, :, self.output_dim:])
logvar = self.min_logvar + F.softplus(logvar - self.min_logvar)
if mode=='rs':
if ret_log_var:
return mean, logvar
else:
return mean, torch.exp(logvar)
elif mode=='s':
if ret_log_var:
return mean[:, :, self.reward_size:], logvar[:, :, self.reward_size:]
else:
return mean[:, :, self.reward_size:], torch.exp(logvar[:, :, self.reward_size:])
elif mode=='r':
if ret_log_var:
return mean[:, :, :self.reward_size], logvar[:, :, :self.reward_size]
else:
return mean[:, :, :self.reward_size], torch.exp(logvar[:, :, :self.reward_size])
def get_decay_loss(self):
decay_loss = 0.
for m in self.children():
if isinstance(m, EnsembleFC):
decay_loss += m.weight_decay * torch.sum(torch.square(m.weight)) / 2.
return decay_loss
def loss(self, mean, logvar, labels, inc_var_loss=True):
"""
mean, logvar: Ensemble_size x N x dim
labels: N x dim
"""
assert len(mean.shape) == len(logvar.shape) == len(labels.shape) == 3
inv_var = torch.exp(-logvar)
if inc_var_loss:
mse_loss = torch.mean(torch.mean(torch.pow(mean - labels, 2) * inv_var, dim=-1), dim=-1)
var_loss = torch.mean(torch.mean(logvar, dim=-1), dim=-1)
total_loss = torch.sum(mse_loss) + torch.sum(var_loss)
else:
mse_loss = torch.mean(torch.pow(mean - labels, 2), dim=(1, 2))
total_loss = torch.sum(mse_loss)
return total_loss, mse_loss
def train(self, loss, loss_regular,weight_grad_loss=1000):
gamma = weight_grad_loss
self.optimizer.zero_grad()
loss += 0.01 * torch.sum(self.max_logvar) - 0.01 * torch.sum(self.min_logvar)
loss += gamma * loss_regular
if self.use_decay:
loss += self.get_decay_loss()
loss.backward()
self.optimizer.step()
class EnsembleDynamicsModel():
def __init__(self, network_size, elite_size, state_size, action_size, reward_size=1, hidden_size=200, use_decay=False):
self.network_size = network_size
self.elite_size = elite_size
self.model_list = []
self.state_size = state_size
self.action_size = action_size
self.reward_size = reward_size
self.network_size = network_size
self.elite_model_idxes = []
self.elite_model_idxes_reward = []
self.ensemble_model = EnsembleModel(state_size, action_size, reward_size, network_size, hidden_size, use_decay=use_decay)
self.scaler = StandardScaler()
self.state_size = state_size
self.action_size = action_size
self.tree = None
def function_grad(self, x):
x = x.view(self.network_size, -1, self.state_size+self.action_size)
state = x[:,:,:self.state_size]
x = self.scaler.transform_tensor(x)
y, _ = self.ensemble_model(x, mode='rs', ret_log_var=True)
y[:,:,self.reward_size:] += state
return y.view(-1, self.state_size+self.reward_size, self.state_size+self.reward_size)
def train(self, inputs, labels, state_regular, action_regular, next_state_regular, reward_regular, batch_size=256, weight_grad_loss=10, holdout_ratio=0., max_epochs_since_update=5, near_n=5):
self._max_epochs_since_update = max_epochs_since_update
self._epochs_since_update = 0
self._state = {}
self._snapshots = {i: (None, 1e10) for i in range(self.network_size)}
num_holdout = int(inputs.shape[0] * holdout_ratio)
permutation = np.random.permutation(inputs.shape[0])
inputs, labels = inputs[permutation], labels[permutation]
train_inputs, train_labels = inputs[num_holdout:], labels[num_holdout:]
holdout_inputs, holdout_labels = inputs[:num_holdout], labels[:num_holdout]
inputs_regular = np.concatenate((state_regular, action_regular), axis=-1)
labels_regular = np.concatenate((reward_regular.reshape([len(reward_regular),1]), next_state_regular), axis=-1)
num_holdout_regular = int(inputs_regular.shape[0] * holdout_ratio)*0
permutation_regular = | np.random.permutation(inputs_regular.shape[0]) | numpy.random.permutation |
import numpy as np
from UQpy.utilities.distances.baseclass.GrassmannianDistance import (
GrassmannianDistance,
)
from UQpy.utilities.GrassmannPoint import GrassmannPoint
class FubiniStudyDistance(GrassmannianDistance):
"""
A class to calculate the Fubini-Study distance between two Grassmann points.
"""
def compute_distance(self, xi: GrassmannPoint, xj: GrassmannPoint) -> float:
"""
Compute the Fubini-Study distance between two points on the Grassmann manifold.
:param xi: Orthonormal matrix representing the first point.
:param xj: Orthonormal matrix representing the second point.
"""
GrassmannianDistance.check_rows(xi, xj)
r = np.dot(xi.data.T, xj.data)
(ui, si, vi) = np.linalg.svd(r, full_matrices=True)
index = | np.where(si > 1) | numpy.where |
import ast
import logging
from .environment import Robot
import numpy as np
import itertools
import matplotlib
import matplotlib.style
import pandas as pd
import sys
from collections import defaultdict
from . import plotting_r as plotting
import json
matplotlib.style.use('ggplot')
SPEED = 0.7
logging.basicConfig(filename='reinforcement-learning.log', filemode='w', level=logging.DEBUG)
def createEpsilonGreedyPolicy(Q, epsilon, num_actions):
"""
Creates an epsilon-greedy policy based
on a given Q-function and epsilon.
Returns a function that takes the state
as an input and returns the probabilities
for each action in the form of a numpy array
of length of the action space(set of possible actions).
"""
def policyFunction(state):
action_probabilities = np.ones(num_actions,dtype=float) * epsilon / num_actions
best_action = np.argmax(Q[state])
action_probabilities[best_action] += (1.0 - epsilon)
return action_probabilities
return policyFunction
def qLearning(env, num_episodes, discount_factor=1, alpha=0.01, epsilon=0.1):
"""
Q-Learning algorithm: Off-policy TD control.
Finds the optimal greedy policy while improving
following an epsilon-greedy policy
"""
# Action value function
# A nested dictionary that maps
# state -> (action -> action-value).
# import pdb; pdb.set_trace()
Q = defaultdict(lambda: | np.zeros(3) | numpy.zeros |
"""
Grid applications
-----------------
Functions to remap data given source and target grids
Some utilities use python tool xESMF.
Author: <NAME> (contributions from <NAME>)
Date: Jan 2019
"""
import numpy as np
import logging
def rotated_grid_transform(lons, lats, pole_lon, pole_lat, rot2reg=True):
# If lon/lat is 1D; create 2D meshgrid
lon, lat = np.meshgrid(lons, lats)\
if lats.ndim == 1 else (lons, lats)
lon = (lon*np.pi)/180 # Convert degrees to radians
lat = (lat*np.pi)/180
theta = 90 - pole_lat # Rotation around y-axis
phi = pole_lon + 180 # Rotation around z-axis
# Convert degrees to radians
theta = (theta*np.pi)/180
phi = (phi*np.pi)/180
# Convert from spherical to cartesian coordinates
x = np.cos(lon)*np.cos(lat)
y = np.sin(lon)*np.cos(lat)
z = np.sin(lat)
if rot2reg: # Rotated -> Regular
phi = -phi
theta = -theta
x_new = np.cos(theta)*np.cos(phi)*x + np.sin(phi)*y +\
np.sin(theta)*np.cos(phi)*z
y_new = -np.cos(theta)*np.sin(phi)*x + np.cos(phi)*y -\
np.sin(theta)*np.sin(phi)*z
z_new = -np.sin(theta)*x + np.cos(theta)*z
else: # Regular -> Rotated
x_new = np.cos(theta)*np.cos(phi)*x + np.cos(theta)*np.sin(phi)*y +\
np.sin(theta)*z
y_new = -np.sin(phi)*x + np.cos(phi)*y
z_new = -np.sin(theta)*np.cos(phi)*x - np.sin(theta)*np.sin(phi)*y +\
np.cos(theta)*z
# Convert cartesian back to spherical coordinates
lon_trans = np.arctan2(y_new, x_new)
lat_trans = np.arcsin(z_new)
# Convert radians back to degrees
lon_trans = (lon_trans*180)/np.pi
lat_trans = (lat_trans*180)/np.pi
return lon_trans, lat_trans
def fnCellCorners(rgrLon, rgrLat):
"""
File name: fnCellBoundaries
Author: <NAME>
E-mail: <EMAIL>
Date created: 20.03.2015
Date last modified: 20.03.2015
Estimate the cell boundaries from the cell location of regular grids
returns: rgrLonBND & rgrLatBND --> arrays of dimension [nlon,nlat]
containing the cell boundaries of each gridcell in rgrlon and rgrlat
"""
# from ipdb import set_trace as stop
logging.debug('fnCellCorners')
rgLonSize = np.array(rgrLon).shape
rgLatSize = np.array(rgrLat).shape
if len(rgLonSize) == 1:
rgrLat = np.broadcast_to(rgrLat, (rgLonSize[0],
rgLatSize[0])).swapaxes(0, 1)
rgrLon = np.broadcast_to(rgrLon, (rgLatSize[0], rgLonSize[0]))
rgiSize = np.array(rgrLon).shape
rgrLonBND = np.empty((rgiSize[0]+1, rgiSize[1]+1,))
rgrLonBND[:] = np.NAN
rgrLatBND = np.empty((rgiSize[0]+1, rgiSize[1]+1,))
rgrLatBND[:] = np.NAN
for lo in range(rgiSize[0]+1):
for la in range(rgiSize[1]+1):
if lo < rgiSize[0]-1 and la < rgiSize[1]-1:
# All points except at the boundaries
rgrLonBND[lo, la] = rgrLon[lo, la] -\
(rgrLon[lo+1, la+1]-rgrLon[lo, la])/2
rgrLatBND[lo, la] = rgrLat[lo, la] -\
(rgrLat[lo+1, la+1]-rgrLat[lo, la])/2
elif lo >= rgiSize[0]-1 and la < rgiSize[1]-1:
# reight boundary second last row
rgrLonBND[lo, la] = rgrLon[lo-1, la] +\
(rgrLon[lo-1, la]-rgrLon[lo-2, la+1])/2
rgrLatBND[lo, la] = rgrLat[lo-1, la] -\
(rgrLat[lo-2, la+1]-rgrLat[lo-1, la])/2
elif lo < rgiSize[0]-1 and la >= rgiSize[1]-1:
# upper boundary second last row
rgrLonBND[lo, la] = rgrLon[lo, la-1] -\
(rgrLon[lo+1, la-2]-rgrLon[lo, la-1])/2
rgrLatBND[lo, la] = rgrLat[lo, la-1] -\
(rgrLat[lo+1, la-2]-rgrLat[lo, la-1])/2
elif lo >= rgiSize[0]-1 and la >= rgiSize[1]-1:
# upper right grid cells
rgrLonBND[lo, la] = rgrLon[lo-1, la-1] -\
(rgrLon[lo-2, la-2]-rgrLon[lo-1, la-1])/2
rgrLatBND[lo, la] = rgrLat[lo-1, la-1] -\
(rgrLat[lo-2, la-2]-rgrLat[lo-1, la-1])/2
if len(rgLonSize) == 1:
rgrLonBND = rgrLonBND[0, :]
rgrLatBND = rgrLatBND[:, 0]
return(rgrLonBND, rgrLatBND)
def calc_vertices(lons, lats, write_to_file=False, filename=None):
"""
Estimate the cell boundaries from the cell location of regular grids
Parameters
----------
lons, lats: arrays
Longitude and latitude values
write_to_file: bool
If True lat/lon information, including vertices, is written to file
following the structure given by cdo commmand 'griddes'
filename: str
Name of text file for the grid information. Only used if write_to_file
is True. If not provided, a default name will be used.
Returns
-------
lon_bnds, lat_bnds: arrays
Arrays of dimension [4, nlat, nlon] containing cell boundaries of each
gridcell in lons and lats
"""
# Dimensions lats/lons
nlon = lons.shape[1]
nlat = lats.shape[0]
# Rearrange lat/lons
lons_row = lons.flatten()
lats_row = lats.flatten()
# Allocate lat/lon corners
lons_cor = np.zeros((lons_row.size*4))
lats_cor = np.zeros((lats_row.size*4))
lons_crnr = np.empty((lons.shape[0]+1, lons.shape[1]+1))
lons_crnr[:] = np.nan
lats_crnr = np.empty((lats.shape[0]+1, lats.shape[1]+1))
lats_crnr[:] = np.nan
# -------- Calculating corners --------- #
# Loop through all grid points except at the boundaries
for lat in range(1, lons.shape[0]):
for lon in range(1, lons.shape[1]):
# SW corner for each lat/lon index is calculated
lons_crnr[lat, lon] = (lons[lat-1, lon-1] + lons[lat, lon-1] +
lons[lat-1, lon] + lons[lat, lon])/4.
lats_crnr[lat, lon] = (lats[lat-1, lon-1] + lats[lat, lon-1] +
lats[lat-1, lon] + lats[lat, lon])/4.
# Grid points at boundaries
lons_crnr[0, :] = lons_crnr[1, :] - (lons_crnr[2, :] - lons_crnr[1, :])
lons_crnr[-1, :] = lons_crnr[-2, :] + (lons_crnr[-2, :] - lons_crnr[-3, :])
lons_crnr[:, 0] = lons_crnr[:, 1] + (lons_crnr[:, 1] - lons_crnr[:, 2])
lons_crnr[:, -1] = lons_crnr[:, -2] + (lons_crnr[:, -2] - lons_crnr[:, -3])
lats_crnr[0, :] = lats_crnr[1, :] - (lats_crnr[2, :] - lats_crnr[1, :])
lats_crnr[-1, :] = lats_crnr[-2, :] + (lats_crnr[-2, :] - lats_crnr[-3, :])
lats_crnr[:, 0] = lats_crnr[:, 1] - (lats_crnr[:, 1] - lats_crnr[:, 2])
lats_crnr[:, -1] = lats_crnr[:, -2] + (lats_crnr[:, -2] - lats_crnr[:, -3])
# ------------ DONE ------------- #
# Fill in counterclockwise and rearrange
count = 0
for lat in range(lons.shape[0]):
for lon in range(lons.shape[1]):
lons_cor[count] = lons_crnr[lat, lon]
lons_cor[count+1] = lons_crnr[lat, lon+1]
lons_cor[count+2] = lons_crnr[lat+1, lon+1]
lons_cor[count+3] = lons_crnr[lat+1, lon]
lats_cor[count] = lats_crnr[lat, lon]
lats_cor[count+1] = lats_crnr[lat, lon+1]
lats_cor[count+2] = lats_crnr[lat+1, lon+1]
lats_cor[count+3] = lats_crnr[lat+1, lon]
count += 4
lons_bnds = lons_cor.reshape(nlat, nlon, 4)
lats_bnds = lats_cor.reshape(nlat, nlon, 4)
if write_to_file:
_write_grid_info(lons_row, lons_cor, lats_row, lats_cor,
nlon, nlat, filename=filename)
return lons_bnds, lats_bnds
def _write_grid_info(lons_row, lons_cor, lats_row, lats_cor, nlon, nlat,
filename):
"""
Write grid info to file
"""
print("Writing grid info to disk ...")
if filename is None:
from datetime import datetime
dtime = datetime.now().strftime('%Y-%m-%dT%H%M%S')
fname = './grid_{}x{}_latlon_bounds_{}'.format(nlon, nlat, dtime)
else:
fname = filename
lt_row = np.array_split(lats_row, np.ceil(lats_row.size/6).astype(np.int))
lt_row_str = "\n".join([" ".join(str(item) for item in arr)
for arr in lt_row])
lt_cor = np.array_split(lats_cor, np.ceil(lats_cor.size/6).astype(np.int))
lt_cor_str = "\n".join([" ".join(str(item) for item in arr)
for arr in lt_cor])
ln_row = np.array_split(lons_row, np.ceil(lons_row.size/6).astype(np.int))
ln_row_str = "\n".join([" ".join(str(item) for item in arr)
for arr in ln_row])
ln_cor = np.array_split(lons_cor, np.ceil(lons_cor.size/6).astype(np.int))
ln_cor_str = "\n".join([" ".join(str(item) for item in arr)
for arr in ln_cor])
grid_txt = ("#\n# gridID 0\n#\ngridtype = curvilinear\ngridsize = {}\n"
"xname = lon\nxlongname = longitude\nxunits = "
"degrees_east\nyname = lat\nylongname = latitude\nyunits"
" = degrees_north\nxsize = {}\nysize = {}\nxvals "
" =\n{}\nxbounds =\n{}\nyvals =\n{}\nybounds "
"=\n{}".format(nlon*nlat, nlon, nlat, ln_row_str, ln_cor_str,
lt_row_str, lt_cor_str))
# Write to file
with open(fname, 'w') as outfile:
outfile.write(grid_txt)
def fnRemapConOperator(rgrLonS, rgrLatS, rgrLonT, rgrLatT, rgrLonSBNDS=None,
rgrLatSBNDS=None, rgrLonTBNDS=None, rgrLatTBNDS=None):
"""
File name: fnRemapConOperator
Author: <NAME>
E-mail: <EMAIL>
Date created: 26.05.2017
Date last modified: 26.05.2017
Generates an opperator to coservatively remapp data from a source
rectangular grid to an target rectangular grid.
Parameters
----------
rgrLonS,rgrLatS: arrays
Source grid longitude and latitude values
rgrLonT,rgrLatT: arrays
Target grid longitude and latitude values
rgrLonSBNDS,rgrLatSBNDS: arrays
Source grid longitude and latitude grid point boundaries (corners).
These must be given in the structure (lat, lon, vertices) where
vertices are the four corners of each grid point. If not provided
(default) then corners are calculated using fnCellCorners.
rgrLonTBNDS,rgrLatTBNDS: arrays
Target grid longitude and latitude grid point boundaries (corners).
See above for more info.
Returns
-------
grConRemapOp: dictionary
opperator that contains the grid cells and their wheights of the
source grid for each target grid cell
"""
from shapely.geometry import Polygon
logging.debug('fnRemapConOperator')
# check if the grids are given in 2D
if len(rgrLonS.shape) == 1:
rgrLonS1 = np.asarray(([rgrLonS, ]*rgrLatS.shape[0]))
rgrLatS = np.asarray(([rgrLatS, ]*rgrLonS.shape[0])).transpose()
rgrLonS = rgrLonS1
if len(rgrLonT.shape) == 1:
rgrLonT1 = np.asarray(([rgrLonT, ]*rgrLatT.shape[0]))
rgrLatT = np.asarray(([rgrLatT, ]*rgrLonT.shape[0])).transpose()
rgrLonT = rgrLonT1
# All lon grids have to go from -180 to +180 --> convert now!'
if np.min(rgrLonS) > 180:
rgi180 = np.where(rgrLonS > 180)
rgrLonS[rgi180] = rgrLonS[rgi180] - 360.
if np.min(rgrLonT) > 180:
rgi180 = np.where(rgrLonT > 180)
rgrLonT[rgi180] = rgrLonT[rgi180] - 360.
if rgrLonSBNDS is None:
# get boundarie estimations for the grid cells since the center points
# are given
rgrLonSB, rgrLatSB = fnCellCorners(rgrLonS, rgrLatS)
else:
rgrLonSB = rgrLonSBNDS
rgrLatSB = rgrLatSBNDS
# All lon grids have to go from -180 to +180 --> convert now!'
if np.min(rgrLonSB) > 180:
rgi180 = np.where(rgrLonSB > 180)
rgrLonSB[rgi180] = rgrLonSB[rgi180] - 360.
if rgrLonTBNDS is None:
rgrLonTB, rgrLatTB = fnCellCorners(rgrLonT, rgrLatT)
else:
rgrLonTB = rgrLonTBNDS
rgrLatTB = rgrLatTBNDS
if np.min(rgrLonTB) > 180:
rgi180 = | np.where(rgrLonTB > 180) | numpy.where |
# # # #
# PYTHON VERSION OF MATT's DOF/DOT/LOGS
# # # #
def tfg_days( x, err='off' ):
''' calculate DOF/DOT/LOGS for a vector of 12 chronological monthly values '''
import itertools
import numpy as np
# filter the div by zero and comparison with np.nan warnings from numpy
if err == 'off':
np.warnings.filterwarnings( "ignore", category=RuntimeWarning )
x[ x == 0 ] = -0.0001 # need to treat zero as freezing (working with signs)
# positive or negative monthly temps
s1 = np.sign( x )
# products of consecutive months' signs: positive indicates no change; negative indicates a potential freeze or thaw transition
s = s1[:11] * s1[1:]
idx, = np.where( s < 0 )
# may be length zero (no transitions)
ind = np.sort( np.concatenate( [idx, idx+1] ) )
if np.any( np.isnan( x ) == True ): # ignore cells with missing data
dot, dof, grow = itertools.repeat( np.array([np.nan]), 3 )
case = 1
elif (len(ind) == 0) & (s1[0] > 0): # no transitions: all positive temps means no freeze day
dot = 0
dof, grow = itertools.repeat( np.array([365]), 2 )
case = 2
elif (len(ind) == 0) & (s1[0] < 0): # no transitions: all negative temps means no thaw day
dot = np.array([365])
dof, grow = itertools.repeat( np.array([0]), 2 )
case = 3
# [ML FIXED]
elif len(ind) == 2: # only one transition during the year, thawing or freezing
# places where we know the ground freezes and thaws,
# but during a specific 12 months we just don't happen to witness both
# only thaw occurs
if x[ ind[0] ] < 0:
# [ml] note:((ind[0]+1)-1) is ind[0]+1 is the month number and minus 1 is to get to previous month
# we could make that a call to a months array -- months = range(1, 12+1)
dot = 15 + 30 * ((ind[0]+1)-1) - np.round( x[ ind[0] ] / (np.diff( x[ ind[:2] ] ) / 30.0), decimals=0 )
dof = np.array([350]) # 350: we know the ground freezes so we use 350 rather than the special 365
grow = dof - dot
case = 4
# only freeze occurs
if x[ ind[0] ] > 0:
dof = 350 - 30 * (12-ind[1]-1) - np.round( x[ ind[1] ] / (np.diff( x[ ind[:2] ] ) / 30.0), decimals=0 )
dot = np.array([15]) # 15: we know the ground thaws so we use 15 rather than the special 0
grow = dof - dot
case = 5
# [ML FIXED]
elif (len(ind) == 4 ) & (s1[0] < 0): # two transitions occur: thaw, then freeze (this is the ideal case; everything else is an idiosyncratic edge case)
# [ml] note:((ind[0]+1)-1) is ind[0]+1 is the month number and minus 1 is to get to previous month
# we could make that a call to a months array -- months = range(1, 12+1)
dot = 15 + 30 * ((ind[0]+1)-1) - np.round( x[ ind[0] ] / (np.diff( x[ ind[:2] ] ) / 30.0), decimals=0 )
dof = 350 - 30 * (12-ind[3]-1) - np.round( x[ ind[3] ] / (np.diff( x[ ind[2:4] ] ) / 30.0), decimals=0 )
grow = dof - dot
case = 0
# [ML FIXED]
elif (len(ind) == 4) & (s1[0] > 0): # two transitions occur but backward to what is expected; freeze, then thaw
if( ind[0] >= 7 ): # freeze occurs in second half of year as expected; late thaw is spurious
# dof = 350 - 30 * (12-ind[1]-1) - np.round( x[ ind[1] ] / (np.diff( x[ ind[:2] ] ) / 30.0), decimals=0 )
dof = 350 - 30 * (12-ind[1]-1) - np.round( x[ ind[1] ] / (np.diff( x[ ind[:2] ] ) / 30.0), decimals=0 )
dot = np.array([15]) # ignore spurious post-freeze thaw; treat as early, unobserved thaw
grow = dof - dot
case = 6
if ind[0] <= 6: # spurious freeze occurs in first half of year; thaw probably fine
dot = 15 + 30 * ((ind[2]+1)-1) - np.round( x[ ind[2] ] / (np.diff( x[ ind[2:4] ]) / 30.0), decimals=0 )
dof = np.array([350]) # ignore spurious early freeze; treat as late, unobserved freeze
grow = dof - dot
case = 7
# [ML FIXED]
elif len(ind) > 4: # more than two transitions; at least one definitely spurious
# [MATT Q]:
# what is the prepending 0 below? and what is its intention?
# what do u do if there is a use-case where idx-0 is already chosen? Py is ZERO-anchored...
ind2, = np.where( s < 0 )
ind2 = ind2 + 1
ind2 = np.insert( ind2, 0, np.array([0]) )
# [ml] m1, m2 are month indexes
m1, = np.where( np.diff( ind2 ) == np.max( np.diff( ind2 ) ) )
m1 = m1[-1] + 1
m2, = np.where( np.delete(np.diff( ind2 ), (m1-1)-1) == max( np.delete(np.diff( ind2 ), (m1-1)-1)) )
m2, = np.where( np.delete(np.diff( ind2 ), (m1-1)) == max( np.delete(np.diff( ind2 ), (m1-1))) )
m2 = m2[-1] + 1
if m1 == m2:
m2 = m2 - 1
ind2 = ind2[ np.sort( np.append( m1, m2 ) ) ]
ind = np.sort( np.append(ind2, ind2+1) ) - 1
dot = 15 + 30 * (ind[1]-1) - np.round( x[ind[1]-1] / (np.diff( x[ ind[:2] ] ) / 30.0), 0) # [ml] SOME WEIRD -1's here...
dof = 350 - 30 * (12-ind[3]-1) - np.round( x[ind[3]] / (np.diff( x[ ind[2:4] ] ) / 30.0), 0)
grow = dof - dot
case = 8
else:
dot, dof, grow = itertools.repeat( np.array([np.nan]), 3 )
# print( "Condition unconsidered: {}".format( x.strip() ) )
return 'dof:{} dot:{} logs:{}'.format( dof,dot,grow )
if __name__ == '__main__':
import numpy as np
# test data
x_list = [ np.array([-16, -5, -1, 3, 5, 10, 12, 16, 11, -3, -15, -16]),
np.array([-16, -5, -1, 3, 5, 10, 12, 16, 11, np.nan, -15, -16]),
np.array([1, 3, 4, 6, 7, 12, 15, 12, 8, 9, 4, 2]),
np.array([-16, -15, -13, -11, -10, -5, 0, -2, -4, -12, -13, -16]),
np.array([-16, -13, -8, -6, 1, 4, 7, 11, 8, 4, 2, 1]),
| np.array([1, 3, 1, 5, 8, 10, 14, 11, 7, -2, -5, -2]) | numpy.array |
import numpy as np
from typing import Any, List, Tuple, Iterable
from pathlib import Path
import logging
import pyn5
from .octrees import OctreeVolume
from .config import SegmentationsConfig
from .arbors import Node
logger = logging.getLogger("sarbor")
class SegmentationSource:
"""
Datastructure to contain volumetric data for algorithms based on segmentations.
Contains 3 major sources of data:
1) segmentation_counts (OctreeVolume[uint8]):
How many times each voxel was selected during segmentation
2) segmentation_views (OctreeVolume[uint8]):
How many times each voxel was contained in a nodes field of view during segmentation
3) distances (OctreeVolume[float32]):
Minimum distance from each voxel to a sample point during segmentation
"""
def __init__(self, config: SegmentationsConfig):
self._sphere = None
# Octrees
self._segmentation_views = None
self._segmentation_counts = None
self._distances = None
# Config
self._config = config
@property
def resolution_phys(self) -> np.ndarray:
return self._config.resolution_phys
@property
def start_phys(self) -> np.ndarray:
return self._config.start_phys
@property
def shape_phys(self) -> np.ndarray:
return self._config("shape_phys")
@property
def end_phys(self) -> np.ndarray:
return self._config.end_phys
@property
def voxel_resolution(self) -> np.ndarray:
return self._config.voxel_resolution
@property
def start_voxel(self) -> np.ndarray:
return self._config.start_voxel
@property
def shape_voxel(self) -> np.ndarray:
return self._config.shape_voxel
@property
def end_voxel(self) -> np.ndarray:
return self._config.end_voxel
@property
def seg_phys_bounds(self) -> Tuple[np.ndarray, np.ndarray]:
return self._config.seg_phys_bounds
@property
def seg_voxel_bounds(self) -> Tuple[np.ndarray, np.ndarray]:
return self._config.seg_voxel_bounds
@property
def downsample_factor(self) -> np.ndarray:
return self._config.downsample_factor
@property
def fov_shape_voxels(self) -> np.ndarray:
return self._config.fov_shape_voxels
@property
def fov_shape_phys(self) -> np.ndarray:
return self._config.fov_shape_phys
@property
def leaf_shape_voxels(self) -> np.ndarray:
return self._config.leaf_shape_voxels
@property
def sphere(self) -> np.ndarray:
"""
A 3D numpy array of shape fov_shape_voxels where each index [i,j,k] contains a bool
indicating whether it is contained in the maximum sized sphere centered at
[i//2, j//2, k//2] that fits in fov_shape_voxels.
This does take into account voxel resolution.
"""
if self._sphere is None:
self._sphere = self._create_sphere(
self.fov_shape_voxels, self.voxel_resolution
)
return self._sphere
@staticmethod
def _data_populator_factory(empty_value: Any, dtype: type):
def data_populator(bounds: Tuple[np.ndarray, np.ndarray]):
return np.full(
np.array(bounds[1]) - np.array(bounds[0]),
fill_value=empty_value,
dtype=dtype,
)
return data_populator
@property
def segmentation_views(self) -> OctreeVolume:
"""
This octree contains counts of how many times a voxel was contained
in a sample points field of view.
"""
if self._segmentation_views is None:
self._segmentation_views = OctreeVolume(
self.leaf_shape_voxels,
self.seg_voxel_bounds,
np.uint8,
self._data_populator_factory(0, np.uint8),
)
return self._segmentation_views
@property
def segmentation_counts(self) -> OctreeVolume:
"""
This octree contains counts of how many times a voxel was assigned
a value of "in" the desired volume.
"""
if self._segmentation_counts is None:
self._segmentation_counts = OctreeVolume(
self.leaf_shape_voxels,
self.seg_voxel_bounds,
np.uint8,
self._data_populator_factory(0, np.uint8),
)
return self._segmentation_counts
@property
def distances(self) -> OctreeVolume:
"""
This octree contains the distances from each voxel to its closest
sample point.
"""
if self._distances is None:
self._distances = OctreeVolume(
self.leaf_shape_voxels,
self.seg_voxel_bounds,
float,
self._data_populator_factory(float("inf"), float),
)
return self._distances
@staticmethod
def _create_sphere(shape, resolution):
"""
Create a roughly isotropic shpere constrained in the bounds of shape to
avoid letting non-isotropic data bias calculations.
Especially important when detecting missing branches since we want to be
able to detect branches in the z direction, and not let them get over powered by
the extended view range in the x-y directions
"""
def dist_to_center(i, j, k, shape, resolution):
i = (
# scale: [0-shape-1] - [-shape-1, shape-1]
(2 * (i - shape[0] // 2))
# scale up by resolution to get isotropic distances
* resolution[0]
# scale shortest axis down to [-1,1]
/ np.min(shape * resolution)
)
j = (2 * (j - shape[1] // 2)) * resolution[1] / np.min(shape * resolution)
k = (2 * (k - shape[2] // 2)) * resolution[2] / np.min(shape * resolution)
return (i ** 2 + j ** 2 + k ** 2) ** (0.5)
sphere = np.ones(shape)
for i in range(shape[0]):
for j in range(shape[1]):
for k in range(shape[2]):
if dist_to_center(i, j, k, shape, resolution) > 1:
sphere[i, j, k] = 0
return sphere
@staticmethod
def _dist_block(dimensions, resolution):
half_dim = dimensions // 2
x = (
(np.linspace(-half_dim[0], half_dim[0], dimensions[0]) * resolution[0]) ** 2
).reshape(dimensions[0], 1, 1)
y = (
(np.linspace(-half_dim[1], half_dim[1], dimensions[1]) * resolution[1]) ** 2
).reshape(1, dimensions[1], 1)
z = (
(np.linspace(-half_dim[2], half_dim[2], dimensions[2]) * resolution[2]) ** 2
).reshape(1, 1, dimensions[2])
return (x + y + z) ** (0.5) / np.sum((half_dim * resolution) ** 2) ** (0.5)
def create_octrees_from_nodes(self, nodes: Iterable[Node]):
dist_block = self._dist_block(self.fov_shape_voxels, self.voxel_resolution)
if self._config.use_sphere:
dist_block[np.logical_not(self.sphere)] = float("inf")
for node in nodes:
node_bounds = self.transform_bounds(self.get_roi(node.value.center))
if node.value.mask is not None:
self.segmentation_counts[node_bounds] += node.value.mask
self.segmentation_views[node_bounds] += 1
self.distances[node_bounds] = np.minimum(
self.distances[node_bounds], dist_block
)
if (
self._config.interpolate_distance_nodes > 0
and node.children is not None
):
for neighbor in node.children:
for k in range(1, self._config.interpolate_distance_nodes + 1):
linear_step = neighbor.value.center * k / (
self._config.interpolate_distance_nodes + 1
) + node.value.center * (
self._config.interpolate_distance_nodes - k
) / (
self._config.interpolate_distance_nodes + 1
)
mid_bounds = self.transform_bounds(self.get_roi(linear_step))
self.distances[mid_bounds] = np.minimum(
self.distances[mid_bounds], dist_block
)
def save_data(self, folder_path: Path):
"""
Save all the data necessary to rebuild retrieve
any segmentation from this class
"""
datasets = {
"segmentation_views": self.segmentation_views,
"segmentation_counts": self.segmentation_counts,
"distances": self.distances,
}
for name, data in datasets.items():
logger.debug("Saving {} to n5!".format(name))
logger.debug("Num leaves = {}".format(len(list(data.iter_leaves()))))
data.write_to_n5(folder_path + "/segmentations.n5", name)
def save_data_for_CATMAID(self, folder_path: Path):
"""
Save the segmentation confidence score
"""
pyn5.create_dataset(
folder_path + "/segmentations.n5",
"confidence",
[int(x) for x in self.end_voxel],
[int(x) for x in self.leaf_shape_voxels],
"UINT8",
)
dataset = pyn5.open(folder_path + "/segmentations.n5", "confidence")
for leaf in self.distances.iter_leaves():
pyn5.write(
dataset,
leaf.bounds,
(
255
* self._view_weighted_mask(
tuple(map(slice, leaf.bounds[0], leaf.bounds[1]))
)
).astype(np.uint8),
np.uint8,
)
def load_data(self, folder_path: Path):
self._segmentation_views = OctreeVolume.read_from_n5(
folder_path, "segmentation_views", self.shape_voxel
)
self._segmentation_counts = OctreeVolume.read_from_n5(
folder_path, "segmentation_counts", self.shape_voxel
)
self._distances = OctreeVolume.read_from_n5(
folder_path, "distances", self.shape_voxel
)
def transform_bounds(self, bounds: Tuple[np.ndarray, np.ndarray]) -> Tuple[slice]:
"""
Takes bounds in tuple format ((a,b,c), (A,B,C)) and converts them into slices
[a:A, b:B, c:C] in voxel space
"""
# TODO: move assertions into a proper unittest
assert all(
bounds[0] < bounds[1]
), "Resulting shape must be positive on all axes"
assert all(
bounds[0] % self.voxel_resolution == 0
), "Lower bound does not start on a voxel"
assert all(
(bounds[1] - bounds[0]) % self.voxel_resolution == 0
), "Queried shape must be a multiple of the voxel shape"
return tuple(
map(
slice,
(bounds[0] // self.voxel_resolution).astype(int),
(bounds[1] // self.voxel_resolution).astype(int),
)
)
def get_roi(self, center: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
voxel_shape = self.voxel_resolution
fov_shape = self.fov_shape_phys
center_block = center - center % voxel_shape
block_offset = (fov_shape // voxel_shape) // 2
start = center_block - block_offset * voxel_shape
end = center_block + (block_offset + 1) * voxel_shape
# TODO: put this is a test file
assert all(
end - start == fov_shape
), "ROI does not cover the expected area: {} vs {}".format(
end - start, fov_shape
)
return start, end
def boolean_mask(self, center: np.ndarray) -> np.ndarray:
bounds = self.transform_bounds(self.get_roi(center))
mask = self.segmentation_counts[bounds] > 0
if self._config.use_sphere:
mask[np.logical_not(self.sphere)] = False
return mask
def _boolean_mask(self, bounds: List[slice]) -> np.ndarray:
mask = self.segmentation_counts[bounds] > 0
if np.isnan(mask).any():
raise ValueError("boolean_mask contains NAN!")
if np.isinf(mask).any():
raise ValueError("boolean_mask contains INF!")
return mask
def dist_weighted_boolean_mask(self, center: np.ndarray) -> np.ndarray:
bounds = self.transform_bounds(self.get_roi(center))
mask = self._dist_weighted_boolean_mask(bounds)
if self._config.use_sphere:
mask[np.logical_not(self.sphere)] = 0
return mask
def _dist_weighted_boolean_mask(self, bounds: List[slice]):
mask = self._boolean_mask(bounds) * self._distance_mask(bounds)
if np.isnan(mask).any():
raise ValueError("dist_weighted_boolean_mask contains NAN!")
if np.isinf(mask).any():
raise ValueError("dist_weighted_boolean_mask contains INF!")
return mask
def view_weighted_mask(self, center: np.ndarray) -> np.ndarray:
bounds = self.transform_bounds(self.get_roi(center))
mask = self._view_weighted_mask(bounds, incr_denom=self._config.incr_denom)
if self._config.use_sphere:
mask[np.logical_not(self.sphere)] = 0
return mask
def _view_weighted_mask(self, bounds: List[slice]) -> np.ndarray:
mask = self.segmentation_counts[bounds] / (
self.segmentation_views[bounds] + self._config.incr_denom
)
assert mask.max() <= 1, "Cannot have confidence above 100%"
if np.isnan(mask).any():
raise ValueError("view_weighted_mask contains NAN!")
if np.isinf(mask).any():
raise ValueError("view_weighted_mask contains INF!")
return mask
def dist_view_weighted_mask(self, center: np.ndarray) -> np.ndarray:
bounds = self.transform_bounds(self.get_roi(center))
mask = self._dist_view_weighted_mask(bounds)
if self._config.use_sphere:
mask[np.logical_not(self.sphere)] = 0
return mask
def _dist_view_weighted_mask(self, bounds: List[slice]) -> np.ndarray:
mask = self._view_weighted_mask(bounds) * self._distance_mask(bounds)
if np.isnan(mask).any():
raise ValueError("dist_view_weighted_mask contains NAN!")
if np.isinf(mask).any():
raise ValueError("dist_view_weighted_mask contains INF!")
return mask
def _distance_mask(self, bounds: List[slice]) -> np.ndarray:
distances = self.distances[bounds]
logger.debug(
"Percent of distances seen that are infinite is: {}".format(
| np.isinf(distances) | numpy.isinf |
from soxs.instrument import make_background, AuxiliaryResponseFile, \
instrument_simulator, make_background_file, simulate_spectrum, \
RedistributionMatrixFile
from soxs.background.foreground import hm_astro_bkgnd
from soxs.background.instrument import acisi_particle_bkgnd
from soxs.background.spectra import ConvolvedBackgroundSpectrum
from numpy.random import RandomState
from numpy.testing import assert_allclose
import astropy.io.fits as pyfits
import tempfile
import os
import shutil
import numpy as np
prng = RandomState(24)
def test_uniform_bkgnd_scale():
hdxi_arf = AuxiliaryResponseFile("xrs_hdxi_3x10.arf")
events, event_params = make_background((50, "ks"), "hdxi", [30., 45.],
foreground=True, instr_bkgnd=True,
ptsrc_bkgnd=False, prng=prng)
ncts = np.logical_and(events["energy"] >= 0.7, events["energy"] <= 2.0).sum()
t_exp = event_params["exposure_time"]
fov = (event_params["fov"]*60.0)**2
S = ncts/t_exp/fov
dS = np.sqrt(ncts)/t_exp/fov
foreground = ConvolvedBackgroundSpectrum(hm_astro_bkgnd, hdxi_arf)
f_sum = foreground.get_flux_in_band(0.7, 2.0)[0]
i_sum = acisi_particle_bkgnd.get_flux_in_band(0.7, 2.0)[0]
b_sum = (f_sum+i_sum).to("ph/(arcsec**2*s)").value
assert np.abs(S-b_sum) < 1.645*dS
def test_simulate_bkgnd_spectrum():
tmpdir = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(tmpdir)
prng = RandomState(29)
hdxi_arf = AuxiliaryResponseFile("xrs_hdxi_3x10.arf")
hdxi_rmf = RedistributionMatrixFile("xrs_hdxi.rmf")
exp_time = 50000.0
fov = 3600.0
simulate_spectrum(None, "hdxi", exp_time, "test_bkgnd.pha",
instr_bkgnd=True, foreground=True, prng=prng,
overwrite=True, bkgnd_area=(fov, "arcsec**2"))
ch_min = hdxi_rmf.e_to_ch(0.7)-hdxi_rmf.cmin
ch_max = hdxi_rmf.e_to_ch(2.0)-hdxi_rmf.cmin
f = pyfits.open("test_bkgnd.pha")
ncts = f["SPECTRUM"].data["COUNTS"][ch_min:ch_max].sum()
f.close()
S = ncts/exp_time/fov
dS = np.sqrt(ncts)/exp_time/fov
foreground = ConvolvedBackgroundSpectrum(hm_astro_bkgnd, hdxi_arf)
f_sum = foreground.get_flux_in_band(0.7, 2.0)[0]
i_sum = acisi_particle_bkgnd.get_flux_in_band(0.7, 2.0)[0]
b_sum = (f_sum+i_sum).to("ph/(arcsec**2*s)").value
assert np.abs(S-b_sum) < 1.645*dS
os.chdir(curdir)
shutil.rmtree(tmpdir)
def test_add_background():
tmpdir = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(tmpdir)
prng1 = RandomState(29)
prng2 = RandomState(29)
ra0 = 30.0
dec0 = 45.0
ra1 = 22.0
dec1 = 22.0
exp_time = 50000.0
ra = np.array([])
dec = np.array([])
e = np.array([])
empty_cat = {"ra": [ra], "dec": [dec], "energy": [e],
"flux": [0.0], "emin": [0.1], "emax": [10.0],
"sources": ["empty"]}
instrument_simulator(empty_cat, "evt1.fits", exp_time, "hdxi",
[ra0, dec0], prng=prng1, overwrite=True)
make_background_file("bkg_evt.fits", exp_time, "hdxi", [ra0, dec0],
prng=prng2, overwrite=True)
instrument_simulator(empty_cat, "evt2.fits", exp_time, "hdxi",
[ra1, dec1], bkgnd_file="bkg_evt.fits",
prng=prng2, overwrite=True)
f1 = pyfits.open("evt1.fits")
f2 = pyfits.open("evt2.fits")
for key in ["X", "Y", "ENERGY", "PHA"]:
assert_allclose(f1["EVENTS"].data[key], f2["EVENTS"].data[key],
)
f1.close()
f2.close()
os.chdir(curdir)
shutil.rmtree(tmpdir)
def test_ptsrc():
from soxs.background.point_sources import generate_fluxes, \
make_ptsrc_background
from soxs.data import cdf_fluxes, cdf_gal, cdf_agn
from soxs.constants import erg_per_keV
tmpdir = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(tmpdir)
prng = RandomState(33)
fov = 20.0
exp_time = (500.0, "ks")
area = (30000.0, "cm**2")
f_agn = np.zeros((cdf_fluxes.size-1, 100))
f_gal = np.zeros((cdf_fluxes.size-1, 100))
for k in range(100):
agn_fluxes, gal_fluxes = generate_fluxes(exp_time, area, fov, prng)
f_agn[:,k] = np.histogram(agn_fluxes, bins=cdf_fluxes)[0]
f_gal[:,k] = np.histogram(gal_fluxes, bins=cdf_fluxes)[0]
mu_agn = | np.mean(f_agn, axis=1) | numpy.mean |
import io
import functools
# import soundfile as sf
import numpy as np
import matplotlib
import matplotlib.pylab as plt
from IPython.display import display, Audio
from nara_wpe.utils import stft, istft
from pb_bss.distribution import CACGMMTrainer
from pb_bss.evaluation import InputMetrics, OutputMetrics
from dataclasses import dataclass
from beamforming_wrapper import beamform_mvdr_souden_from_masks
from pb_chime5.utils.numpy_utils import segment_axis_v2
from text_grid import *
def get_time_activity(file_path, wavlen, sr):
time_activity = [False] * wavlen
text = read_textgrid_from_file(file_path)
for interval in text.tiers[1].intervals:
if 'NOISE' not in interval.text:
xmax = int(interval.xmax * sr)
xmin = int(interval.xmin * sr)
if xmax > wavlen:
break
for i in range(xmin, xmax):
time_activity[i] = True
print('num of true {}'.format(time_activity.count(True)))
return time_activity
def get_frequency_activity(time_activity,stft_window_length,stft_shift,stft_fading=True,stft_pad=True,):
time_activity = np.asarray(time_activity)
if stft_fading:
pad_width = np.array([(0, 0)] * time_activity.ndim)
pad_width[-1, :] = stft_window_length - stft_shift # Consider fading
time_activity = np.pad(
time_activity,
pad_width,
mode='constant'
)
return segment_axis_v2(
time_activity,
length=stft_window_length,
shift=stft_shift,
end='pad' if stft_pad else 'cut'
).any(axis=-1)
@dataclass
class Beamformer:
type: str
postfilter: str
def __call__(self, Obs, target_mask, distortion_mask, debug=False):
bf = self.type
if bf == 'mvdrSouden_ban':
from pb_chime5.speech_enhancement.beamforming_wrapper import (
beamform_mvdr_souden_from_masks
)
X_hat = beamform_mvdr_souden_from_masks(
Y=Obs,
X_mask=target_mask,
N_mask=distortion_mask,
ban=True,
)
elif bf == 'ch0':
X_hat = Obs[0]
elif bf == 'sum':
X_hat = np.sum(Obs, axis=0)
else:
raise NotImplementedError(bf)
if self.postfilter is None:
pass
elif self.postfilter == 'mask_mul':
X_hat = X_hat * target_mask
else:
raise NotImplementedError(self.postfilter)
return X_hat
@dataclass
class GSS:
iterations: int = 20
iterations_post: int = 0
verbose: bool = True
# use_pinv: bool = False
# stable: bool = True
def __call__(self, Obs, acitivity_freq=None, debug=False):
initialization = np.asarray(acitivity_freq, dtype=np.float64)
initialization = | np.where(initialization == 0, 1e-10, initialization) | numpy.where |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/03_basic_agents.ipynb (unless otherwise specified).
__all__ = ['ActionSelector', 'ArgmaxActionSelector', 'EpsilonGreedyActionSelector', 'ProbabilityActionSelector',
'default_states_preprocessor', 'float32_preprocessor', 'BaseAgent', 'TestAgent', 'DiscreteAgent', 'DQNAgent',
'TargetNet', 'PolicyAgent', 'ActorCriticAgent']
# Cell
import torch, torch.nn.functional as F
from torch import ByteTensor, DoubleTensor, FloatTensor, HalfTensor, LongTensor, ShortTensor, Tensor
from torch import nn, optim, as_tensor
from torch.utils.data import BatchSampler, DataLoader, Dataset, Sampler, TensorDataset
from torch.nn.utils import weight_norm, spectral_norm
from dataclasses import asdict,dataclass
from typing import Callable,Tuple,Union
# from fastai.torch_core import *
# from fastai.basic_data import *
# from fastai.basic_train import *
from fastai.basics import *
import textwrap
import numpy as np
import logging
"Note these are modified versions of 'Shmuma/Ptan'. Github, 2020, https://github.com/Shmuma/ptan/blob/master/ptan/agent.py. Accessed 13 June 2020."
# Cell
class ActionSelector:
"Abstract class which converts scores to the actions."
def __call__(self,scores):raise NotImplementedError
class ArgmaxActionSelector(ActionSelector):
"Selects actions using argmax."
def __call__(self,scores):
assert isinstance(scores,np.ndarray)
return | np.argmax(scores,axis=1) | numpy.argmax |
from typing import List, Tuple, Dict
import cv2
import pytesseract
import numpy as np
pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe"
Coordinate = Tuple[int, int]
grid_size: Tuple[int] = tuple()
DEBUG = False
blocks = []
def set_block(block):
global blocks
blocks = block
def split(file_path) -> Dict[Coordinate, np.ndarray]:
im: np.ndarray = cv2.imread(file_path)
cell_size = (round(im.shape[0] / grid_size[0]), round(im.shape[1] / grid_size[1]))
cells = {}
for y in range(grid_size[1]):
for x in range(grid_size[0]):
cell = im[y * cell_size[1]:(y + 1) * cell_size[1], x * cell_size[0]:(x + 1) * cell_size[0]]
# cell = im.crop((x * cell_size[0], y * cell_size[1], (x + 1) * cell_size[0], (y + 1) * cell_size[1]))
cells[(x, y)] = cell
return cells
def set_starters_grid_size(grid_x, grid_y):
global grid_size
grid_size = (grid_x, grid_y)
def show_parts(file_path, grid_x, grid_y) -> Dict[Coordinate, int]:
global grid_size
grid_size = (grid_x, grid_y)
sections = split(file_path)
starters: Dict[Coordinate, int] = {}
for y in range(grid_y):
for x in range(grid_x):
im: np.ndarray = sections[(x, y)]
# Alternatively: can be skipped if you have a Blackwhite image
gray = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY)
_, img_bin = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
gray: np.ndarray = cv2.bitwise_not(img_bin)
cell_x, cell_y = gray.shape[0:2]
margin = round(cell_x / 7.5)
gray = gray[margin:-margin, margin:-margin]
if DEBUG:
cv2.imshow("a", gray)
kernel = np.ones((2, 1), np.uint8)
img = cv2.erode(gray, kernel, iterations=1)
img = cv2.dilate(img, kernel, iterations=1)
out_below = pytesseract.image_to_string(img, config='--psm 10 --oem 3 -c tessedit_char_whitelist=0123456789 -c page_separator=""').strip()
if len(out_below) > 0:
starters[(x, y)] = int(out_below)
if DEBUG:
print(f"({out_below}), {len(out_below)=}")
cv2.waitKey()
cv2.destroyWindow("a")
# cv2.imwrite("single_cell.png", im)
return starters
def resolve(starters):
original_image: np.ndarray = | np.full((grid_size[1] + 2, grid_size[0] + 2, 3), 255, np.uint8) | numpy.full |
# ------------------------------------------------------------
# Copyright (c) 2017-present, SeetaTech, Co.,Ltd.
#
# Licensed under the BSD 2-Clause License.
# You should have received a copy of the BSD 2-Clause License
# along with the software. If not, See,
#
# <https://opensource.org/licenses/BSD-2-Clause>
#
# ------------------------------------------------------------
"""Test the ops module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import os
import unittest
import numpy as np
from dragon.core.util import nest
from dragon.core.testing.unittest.common_utils import run_tests
from dragon.vm import torch
# Fix the duplicate linked omp runtime
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
# Fix the numpy seed
np.random.seed(1337)
class OpTestCase(unittest.TestCase):
"""The base test case."""
precision = 1e-5
def __init__(self, method_name='runTest'):
super(OpTestCase, self).__init__(method_name)
def assertEqual(
self,
first,
second,
msg=None,
prec=None,
):
if prec is None:
prec = self.precision
inputs = nest.flatten(first)
num_first = len(inputs)
inputs += nest.flatten(second)
num_second = len(inputs) - num_first
for i, input in enumerate(inputs):
if isinstance(input, torch.Tensor):
inputs[i] = input.numpy()
first = inputs[:num_first] if num_first > 1 else inputs[0]
second = inputs[num_first:len(inputs)] if num_second > 1 else inputs[num_first]
if isinstance(first, np.ndarray) and isinstance(second, np.ndarray):
super(OpTestCase, self).assertEqual(first.shape, second.shape)
if first.dtype == bool and second.dtype == bool:
diff = first ^ second
num_unique = len(np.unique(diff))
self.assertLessEqual(num_unique, 1, msg)
else:
diff = | np.abs(first - second) | numpy.abs |
# -*- coding: utf-8 -*-
"""
Created on Dec 12 2020
Code to run MCMC (with fast-burn in) for PICO NR study, WIMP sensitivity
version with "horizontal re-seeding"
parallelization done with python library Multiprocessing
Inputs are (in order):
- directory to find data in
- Period of MCMC run
- epoch_nstep
- bin_number
- stepsize
- chi2 hard cap
- WIMP mass
@author: DDurnford
"""
# libraries
import emcee
import numpy as np
import PICOcalGlobalLikelihood_reparametrization_multi_v2 as pcgl
import os
os.environ["OMP_NUM_THREADS"] = "1"
from multiprocessing import Pool
import warnings
warnings.filterwarnings("ignore")
np.load.__defaults__=(None, True, True, 'ASCII')
import pickle
from scipy.stats import binned_statistic
import sys
import scipy.io as sio
args = sys.argv
np.random.seed(42)
# include all nuisance parameters
which_nuisance = np.array([np.ones(pcgl.n_nuisance,dtype = np.bool)])
dim_nuisance = np.sum(which_nuisance)
# number of thresholds
num_threshold = pcgl.threshold_fenceposts.size
# number of species
num_elements = 2
# number of parameters in the model
ndim = 10*num_threshold + dim_nuisance
# number of dimensions to consider for WIMP recasting
mDim = 8
#------ Initial Guess
# BF from Period 34 (ddurnford fit of PICO data)
guess_theta = np.array([ 1.65750550e+00, 1.19668186e+00, 1.66530667e+00, 1.27574295e+00, -2.82076273e+00, -2.71818698e+00, -3.01324190e+00, -1.88755528e+00,1.66976041e+00, -5.64587118e+00, 1.75194971e+00, -5.41992168e+00,6.43072211e-01, -5.24568677e-01, 3.59527604e-01, -6.14857566e-01,-4.19287206e-01, 7.85916476e-01, 4.71423407e-02, 1.75578191e+00,5.53690885e-03, -3.31378126e-01, 3.86920360e-01, 1.09323458e+00,-7.06982858e-02, -1.43923824e+00, 8.82628498e-01, 2.78938373e-01,-7.56704066e-01, 9.73561639e-01, 6.23926470e-01, -2.66908442e-01,-1.10396359e+00, -5.22685251e-02])
#-------- Volume calculation
# reasonable bounds for volume calcuation
binsa = np.array([ 1.01808316, 0.89609191, 1.29266798, 1.16315096, -3.88617265,
-3.64865946, -5.60787692, -3.18800453, 0.36706077, -7.83267239,
0.81973171, -8.1652399 , -0.59245043, -2.89515001, -0.07374429,
-2.70995565, -1.58162291, -0.91317244, -2.98916088, -1.78958249,
-0.75211146, -1.44435034, -0.60465208, 0.6712873 , -1.08475804,
-2.42844962, -0.26551765, -0.74018606, -1.62686749, 0.2526427 ,
-0.36140405, -1.30059274, -2.05057406, -0.21927138])
binsb = np.array([ 2.56330499, 1.23492372, 2.56346639, 1.46296621, -0.78377603,
0.16873003, -2.05195839, -0.66289017, 2.34041311, -2.87832399,
3.90205553, -4.91489277, 1.72977452, 0.20070191, 2.24981077,
0.75238084, 2.00114598, 2.08220374, 0.81442556, 2.24036402,
1.11866961, 0.21818037, 1.73594775, 2.0517152 , 0.50993029,
-0.87082394, 0.92066029, 1.26558695, -0.06077413, 1.63325533,
1.52532272, 0.80405223, 0.06672319, 0.05886753])
def calcVol(S,L,additive = 100):
''' This calculates the "1-sigma volume" contained by explored mcmc samples
Inputs: S, samples of mcmc
L, log_prob values
additive = positive additive constant to keep volumes > 0
Outputs: "volume"
'''
#number of dimensions
ndim = S.shape[1]
#initialize
v = 0.
nb = 60
#main loop
for i in range(ndim):
maxi,edd,indi = binned_statistic(S[:,i],L,'max',nb,(binsa[i],binsb[i]))
bc = edd[0:-1] + 0.5*(edd[1]-edd[0])
if all(np.isnan(maxi)) == True:
continue
maxi[np.isnan(maxi) == True] = np.min(maxi[np.isnan(maxi)==False])
v += np.trapz(maxi - additive,bc)
return v
def calcVol2(S,L):
''' This calculates the "1-sigma volume" contained by explored mcmc samples
New, simpler version with no additive constant required, although it does
allow for the volume to decrease from one epoch to the next
Inputs: S, samples of mcmc
L, log_prob values
additive = positive additive constant to keep volumes > 0
Outputs: "volume"
'''
#select 1 sigma samples
Ss = S[L > np.max(L)-1]
#number of dimensions
nD = np.shape(Ss)[1]
#initialize volume
vol = 0.
#for each dimension, add up range subtended by 1 sigma samples
for i in range(nD):
vol += (np.max(Ss[:,i]) - np.min(Ss[:,i]))
return vol
#-------- Some constants for WIMP sensitivity
SI_denominator = 3*12.011 + 8*18.998403163
SI_C_numerator = 3*12.011
SI_F_numerator = 8*18.998403163
mass_array = np.array([1.5849e+00, 1.7378e+00, 1.9055e+00, 2.0893e+00, 2.2909e+00, 2.5119e+00, 2.7542e+00,
3.0200e+00, 3.3113e+00, 3.6308e+00, 3.9811e+00, 4.3652e+00, 4.7863e+00, 5.2481e+00, 5.7544e+00,
6.3096e+00, 6.9183e+00, 7.5858e+00, 8.3176e+00, 9.1201e+00, 1.0000e+01, 1.0965e+01, 1.2023e+01,
1.3183e+01, 1.4454e+01, 1.5849e+01, 1.7378e+01, 1.9055e+01, 2.0893e+01, 2.2909e+01, 2.5119e+01,
3.1623e+01, 3.9811e+01, 5.0119e+01, 6.3096e+01, 7.9433e+01, 1.0000e+02, 1.2589e+02, 1.5849e+02,
1.9953e+02, 2.5119e+02, 3.1623e+02, 1.0000e+03, 3.1623e+03, 1.0000e+04, 3.1623e+04, 1.0000e+05])
bin_length = 13001
bin_width = 0.01
which_mass = np.zeros(mass_array.shape[0], dtype=np.bool)
#-------- Load WIMP spectra and define WIMP masses
# (taken from chimera:/home/mjn693/Documents/LL/Python_objects/)
WIMPspectra_production = sio.loadmat('WIMPspectra_production.mat')
#-------- Production run Parameters
# What data to look at?
topdir = args[1]
# Period for MCMC run
Period = args[2]
print('------ Period ' + Period + ' ------')
#Prep PCGL code
pcgl.prep([topdir])
# storage directory for MCMC
storeDir = 'Epoch_storage'
# MCMC parameters
epoch_nstep = int(args[3]) # how many steps per epoch (5 for rough, 10 for fine)
bin_number = int(args[4]) # 100 for rough, 500 for fine
bin_number = 40
ntemps = 1 # historical, kept for formatting reasons
num_walkers = 100 # Number of walkers for initial start
stepsize = float(args[5]) # 2 for faster exploration, 1.2 for fine tuning
nw_i = num_walkers
nd_i = ndim
#reset to more reasonable value
pcgl.chisq_hard_cap = float(args[6])
# WIMP mass and interaction type
wimp_mass = float(args[7])
int_type = args[8]
#determine mass index
mass_index = np.argmin(np.abs(wimp_mass - mass_array))
if np.abs(mass_array[mass_index] - wimp_mass) > 0.25:
print('Warning! No close WIMP mass in table!')
exit()
# Number of CPUs to use (#8 by default)
nCPU = 10
# load from existing epoch?
state_file = storeDir + '/Period'+str(Period)+'_state'
if os.path.exists(state_file) == True:
load_epoch = True
else:
load_epoch = False
# initialize convergence criteria
max0 = -1e100
maxL = -2e100
strike = 0
def treatTheta(theta):
''' This converts direct sample input into efficiency curve points
Inputs: theta
Outputs: Epts
'''
# re-shapes
dEpts_reparam = np.reshape(theta[:20], (5,2,2))
# new array
dEpts = np.zeros([5,2,2])
# just exp's when reparam_fenceposts == 0
for i_th in range(2):
dEpts[0,i_th,:] = np.exp( dEpts_reparam[0,i_th,:])
dEpts[1:,i_th,:] = np.exp(dEpts_reparam[1:,i_th,:])
# sums up contributions
Epts = np.cumsum(dEpts, axis=0)
return Epts
def wimp_treat(theta,mass_index,int_type):
''' Calculates WIMP sensitivity for given theta, mass interaction type
Inputs: theta, wimp mass index, interaction type (sting)
Outputs: 8 WIMP sensitivity combinations (see Jin's thesis)
'''
# Treat theta
thetaT = treatTheta(theta).ravel()
# extract C and F points at both thresholds
C_245 = thetaT[::4]
F_245 = thetaT[1::4]
C_329 = thetaT[2::4]*(3.29/3.)
F_329 = thetaT[3::4]*(3.29/3.)
# create interpolation of efficiency curves
C_interp_245 = np.interp(1.0 + np.arange(0,bin_length*bin_width,bin_width) ,C_245,[0, .2, .5, .8, 1.0])
F_interp_245 = np.interp(1.0 + np.arange(0,bin_length*bin_width,bin_width) ,F_245,[0, .2, .5, .8, 1.0])
C_interp_329 = np.interp(1.0 + np.arange(0,bin_length*bin_width,bin_width) ,C_329,[0, .2, .5, .8, 1.0])
F_interp_329 = np.interp(1.0 + np.arange(0,bin_length*bin_width,bin_width) ,F_329,[0, .2, .5, .8, 1.0])
# what interaction type? For SD...
if int_type == 'SD':
# get rate for fluorine only (for SD)
drde_f = WIMPspectra_production['SD_F_table'][mass_index,:]
# calculate WIMP sensitivity for both thresholds
WS_245 = np.sum(F_interp_245*drde_f)*bin_width
WS_329 = np.sum(F_interp_329*drde_f)*bin_width
# For SI...
elif int_type == 'SI':
# get rate for fluorine and carbon
drde_f = WIMPspectra_production['SI_F_table'][mass_index,:]
drde_c = WIMPspectra_production['SI_C_table'][mass_index,:]
# calculate WIMP sensitivity for both thresholds
WS_245 = ((SI_F_numerator/SI_denominator * np.sum(F_interp_245*drde_f)) + (SI_C_numerator/SI_denominator * np.sum(C_interp_245* drde_c))) * bin_width
WS_329 = ((SI_F_numerator/SI_denominator * np.sum(F_interp_329* drde_f)) + (SI_C_numerator/SI_denominator * np.sum(C_interp_329* drde_c))) * bin_width
# invalid interaction type
else:
print('Invalid interaction type!')
exit()
# 8 combinations of variables
linear_combs = np.array([WS_329,-WS_329,-WS_245,WS_245,WS_245+WS_329,WS_245-WS_329,-WS_245-WS_329,-WS_245+WS_329])
# Done!
return linear_combs
# -----------------------------------------------------------------------------
# Set up initial starting point
epoch_starting_points = np.zeros((num_walkers,ndim))
if load_epoch == True:
# load files
samples_file = storeDir + '/Period'+str(Period)+'_samples.txt'
log_prob_file = storeDir + '/Period'+str(Period)+'_logProb.txt'
wimp_file = storeDir + '/Period'+str(Period)+'_wimp.txt'
lt = storeDir + '/Period'+str(Period)+'_state'
samples = np.loadtxt(samples_file)
log_prob = | np.loadtxt(log_prob_file) | numpy.loadtxt |
import numpy as np
from numpy.testing import run_module_suite, assert_equal, assert_array_equal, \
assert_array_almost_equal, assert_approx_equal, assert_raises
from scipy.stats.contingency import margins, expected_freq, chi2_contingency
def test_margins():
a = np.array([1])
m = margins(a)
assert_equal(len(m), 1)
m0 = m[0]
assert_array_equal(m0, np.array([1]))
a = np.array([[1]])
m0, m1 = margins(a)
expected0 = np.array([[1]])
expected1 = np.array([[1]])
assert_array_equal(m0, expected0)
assert_array_equal(m1, expected1)
a = np.arange(12).reshape(2, 6)
m0, m1 = margins(a)
expected0 = np.array([[15], [51]])
expected1 = np.array([[6, 8, 10, 12, 14, 16]])
assert_array_equal(m0, expected0)
assert_array_equal(m1, expected1)
a = np.arange(24).reshape(2, 3, 4)
m0, m1, m2 = margins(a)
expected0 = np.array([[[66]], [[210]]])
expected1 = np.array([[[60], [92], [124]]])
expected2 = np.array([[[60, 66, 72, 78]]])
assert_array_equal(m0, expected0)
assert_array_equal(m1, expected1)
assert_array_equal(m2, expected2)
def test_expected_freq():
assert_array_equal(expected_freq([1]), | np.array([1.0]) | numpy.array |
# Copyright (c) 2014-2017. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
import numpy as np
class BindingPrediction(object):
def __init__(
self,
peptide,
allele,
affinity,
percentile_rank,
source_sequence_name=None,
offset=0,
log_affinity=None,
prediction_method_name=""):
"""
Parameters
----------
peptide : str
Short amino acid sequence
allele : str
HLA allele, e.g. HLA-A*02:01
affinity : float
Predicted binding affinity
percentile_rank : float
Percentile rank of the binding affinity for that allele
source_sequence_name : str
Name of sequence from which peptide was extracted
offset : int
Base0 starting position in source sequence that all epitopes were
extracted from
log_affinity : float, optional
NetMHC sometimes gives invalid IC50 values but we can still
reconstruct the value from its (1.0 - log_50000(IC50)) score.
prediction_method_name : str, optional
Name of predictor used to generate this prediction.
"""
# if we have a bad IC50 score we might still get a salvageable
# log of the score. Strangely, this is necessary sometimes!
if invalid_affinity(affinity) and np.isfinite(log_affinity):
# pylint: disable=invalid-unary-operand-type
affinity = 50000 ** (-log_affinity + 1)
# if IC50 is still NaN or otherwise invalid, abort
if invalid_affinity(affinity):
raise ValueError(
"Invalid IC50 value %0.4f for %s w/ allele %s" % (
affinity,
peptide,
allele))
if invalid_percentile_rank(percentile_rank):
raise ValueError(
"Invalid percentile rank %s for %s w/ allele %s" % (
percentile_rank, peptide, allele))
self.source_sequence_name = source_sequence_name
self.offset = offset
self.allele = allele
self.peptide = peptide
self.affinity = affinity
self.percentile_rank = percentile_rank
self.prediction_method_name = prediction_method_name
def __str__(self):
format_string = (
"BindingPrediction("
"peptide='%s', "
"allele='%s', "
"affinity=%0.4f, "
"percentile_rank=%s, "
"source_sequence_name=%s, "
"offset=%d, "
"prediction_method_name='%s')")
return format_string % (
self.peptide,
self.allele,
self.affinity,
('%0.4f' % self.percentile_rank
if self.percentile_rank
else None),
('%s' % self.source_sequence_name
if self.source_sequence_name
else None),
self.offset,
self.prediction_method_name)
def clone_with_updates(self, **kwargs):
"""Returns new BindingPrediction with updated fields"""
fields_dict = self.to_dict()
fields_dict.update(kwargs)
return BindingPrediction(**fields_dict)
def __repr__(self):
return str(self)
@property
def length(self):
"""Length of peptide, preserved for backwards compatibility"""
return len(self.peptide)
@property
def value(self):
"""Alias for affinity preserved for backwards compatibility"""
return self.affinity
fields = (
"source_sequence_name",
"offset",
"peptide",
"allele",
"affinity",
"percentile_rank",
"prediction_method_name"
)
def to_tuple(self):
return (
self.source_sequence_name,
self.offset,
self.peptide,
self.allele,
self.affinity,
self.percentile_rank,
self.prediction_method_name)
def to_dict(self):
return {k: v for (k, v) in zip(self.fields, self.to_tuple())}
def __eq__(self, other):
return (
other.__class__ is BindingPrediction and
self.to_tuple() == other.to_tuple())
def __hash__(self):
return hash(self.to_tuple())
def __lt__(self, other):
return self.value < other.value
def invalid_affinity(x):
return x < 0 or np.isnan(x) or | np.isinf(x) | numpy.isinf |
"""
factor.py
Defines variables, variable sets, and dense factors over discrete variables (tables) for graphical models
Version 0.1.0 (2021-03-25)
(c) 2015-2021 <NAME> under the FreeBSD license; see license.txt for details.
"""
import numpy as np
#import autograd.numpy as np
from sortedcontainers import SortedSet as sset
## Under testing: cython-compiled variable sets for faster operations
try:
from pyGMs.varset_c import Var,VarSet
except ImportError:
#print "Compiled version not loaded; importing python version"
from pyGMs.varset_py import Var,VarSet # sortedcontainers version
#from .varset_py2 import Var,VarSet # numpy array version
inf = float('inf')
orderMethod = 'F' # TODO: currently stores in fortran order (as Matlab); should be trivially changable
#orderMethod = 'C' # Can we make this "seamless" to the user, and/or force them to do something consistent?
# Notes: column-major (order=F) puts last index sequentially ("big endian"): t[0 0 0], t[0 0 1], t[0 1 0] ...
# row major (order=C) puts 1st index sequentially ("little endian"): t[0 0 0], t[1 0 0], t[0 1 0], ...
class Factor(object):
"""A basic factor<float> class
Factors are the basic building block of our graphical model representations. In general, a factor
consists of a set of variables (its "scope"), and a table of values indicating f(x) for each
joint configuration x (a tuple of values) of its variables.
Variables are stored in sorted order; most of the time, factors are constructed by reading from files,
but if built by hand it is safest to use indexing to set the values, e.g.,
>>> f = Factor( [X,Y,Z], 0.0 ) # builds a factor over X,Y,Z filled with zeros
>>> f[0,1,0] = 1.5 # set f(X=0,Y=1,Z=0) to 1.5
Useful attributes are f.vars (the scope) and f.table (the table, a numpy array).
Factors are imbued with many basic operations for manipulation:
Operators: *, +, /, -, **, exp, log, abs, etc.
In-place versions: *=, +=, /=, -=, **=, expIP, logIP, etc.
Elimination: max, min, sum, lse (log-sum-exp), etc.
Conditioning: return a factor defined by a sub-table, assigning some variables to values
Other: argmax, argmin, sample, etc., return configurations of X (tuples)
"""
#v = VarSet([]) # internal storage for variable set (VarSet)
#t = np.ndarray([]) # internal storage for table (numpy array)
def __init__(self,vars=VarSet(),vals=1.0):
"""Constructor for Factor class
>>> f = Factor( [X,Y,Z],[vals] ) # creates factor over [X,Y,Z] with table [vals]
[vals] should be a correctly sized numpy array, or something that can be cast to the same.
"""
# TODO: add user-specified order method for values (order=)
# TODO: accept out-of-order vars list (=> permute vals as req'd)
try:
self.v = VarSet(vars) # try building varset with args
except TypeError: # if not iterable (e.g. single variable)
self.v = VarSet() # try just adding it
self.v.add(vars)
#assert( self.v.nrStates() > 0)
#if self.v.nrStatesDouble() > 1e8: raise ValueError("Too big!");
try:
self.t = np.empty(self.v.dims(), float, orderMethod);
self.t[:] = vals # try filling factor with "vals"
except ValueError: # if it's an incompatible shape,
self.t = np.reshape(np.array(vals, float), self.v.dims(), orderMethod) # try again using reshape
def __build(self,vs,ndarray):
"""Internal build function from numpy ndarray"""
self.v = vs
self.t = ndarray
return self
#TODO: def assign(self, F) : set self equal to rhs F, e.g., *this = F
def copy(self):
"""Copy constructor; make a copy of a factor"""
return Factor().__build(self.v.copy(),self.t.copy('K')) # order=orderMethod?
def changeVars(self, vars, copy=True):
"""Copy a factor but change its arguments (scope).
>>> f = Factor([X0,X1], table)
>>> g = changeVars( f, [X7,X5]) # now, g(X5=b,X7=a) = f(X0=a,X1=b)
"""
v = VarSet(vars)
newOrder = map(lambda x:vars.index(x), v)
if copy: ret = Factor(v, self.t.transpose(newOrder))
else: ret = Factor().__build(v, self.t.transpose(newOrder)) # try not to copy if possible
return ret
def __repr__(self):
"""Detailed representation: scope (varset) + table memory location"""
return 'Factor({:s},[0x{:x}])'.format(str(self.v),self.t.ctypes.data)
def __str__(self):
"""Basic string representation: scope (varset) only"""
return 'Factor({:s})'.format(str(self.v))
def latex(self, valueformat="0.4f", factorname="$f(x)$", varnames=None):
"""Return string containing latex code for table values.
Arguments:
valueformat : string formatter for values in value column; default "0.4f"
factorname : string for header of value column
varnames : dict mapping variable ID to string for that column (defaults to $x_i$ if None)
"""
tex = "\\begin{tabular}[t]{" + "".join(["c" for v in self.v]) + "|c}\n"
#tex += " & ".join(["$x"+str(int(v))+"$" for v in self.v]) + " & $f_{"+"".join([str(int(v)) for v in self.v])+"}$ \\\\ \\hline \n"
if varnames is None: varnames = {v:"$x_{"+str(int(v))+"}$" for v in self.v}
tex += " & ".join([varnames[v] for v in self.v]) + " & "+factorname+" \\\\ \\hline \n"
for s in range(self.numel()):
tex += " & ".join([str(si) for si in self.v.ind2sub(s)]) + " & " + ("{:"+valueformat+"}").format(self[s]) + "\\\\ \n"
tex += "\\end{tabular} \n"
return tex
@property
def vars(self):
"""Variables (scope) of the factor; read-only"""
return self.v
@vars.setter
def vars(self,value):
raise AttributeError("Read-only attribute")
@property
def table(self):
"""Table (values, as numpy array) of the factor"""
return self.t
@table.setter
def table(self,values):
try:
self.t[:] = values # try filling factor with "values"
except ValueError: # if it's an incompatible shape,
self.t = np.array(values,dtype=float).reshape(self.v.dims(),order=orderMethod) # try again using reshape
@property
def nvar(self):
"""Number of arguments (variables, scope size) for the factor"""
return len(self.v)
#@property
def dims(self):
"""Dimensions (table shape) of the tabular factor"""
return self.t.shape
#@property # TODO: make property?
def numel(self):
"""Number of elements (size) of the tabular factor"""
return self.t.size
################## METHODS ##########################################
def __getitem__(self,loc):
"""Accessor: F[x1,x2] = F[sub2ind(x1,x2)] = F(X1=x1,X2=x2)"""
if isinstance(loc, dict): return self.valueMap(loc)
if self.t.ndim == 1 or isinstance(loc, (tuple, list)):
return self.t[loc]
else:
try:
return self.t[self.v.ind2sub(loc)]
except ValueError:
raise IndexError("Index {} invalid for table with size {}".format(loc,self.t.shape))
def __setitem__(self,loc,val):
"""Assign values of the factor: F[i,j,k] = F[idx] = val if idx=sub2ind(i,j,k)"""
if isinstance(loc, dict): return self.setValueMap(loc,val)
if self.t.ndim == 1 or isinstance(loc, (tuple, list)):
self.t[loc] = val
else:
try:
self.t[self.v.ind2sub(loc)] = val
#self.t.flat[loc] = val # uses c-contiguous order...
except ValueError:
raise IndexError("Index {} invalid for table with size {}".format(loc,self.t.shape))
#value = __getitem__ # def f.value(loc): Alternate name for __getitem__
def value(self,x):
"""Type-safe version of __getitem__: returns scalar float entry of table at tuple x, or exception"""
if self.nvar == 0: return self.t[0]
return self.t.item(x)
def setValue(self,x,val):
"""Type-safe version of __setitem__: sets a scalar float entry of table at tuple x, or exception"""
self.t.itemset(x,val)
def valueMap(self,x):
"""Accessor: F[x[i],x[j]] where i,j = F.vars, i.e, x is a map from variables to their state values"""
if self.nvar == 0: return self.t[0] # if a scalar f'n, nothing to index
return self.t[tuple(x[v] for v in self.v)] # otherwise, find entry of table
def setValueMap(self,x,val):
"""Set F[x[i],x[j]] = val, where i,j = F.vars, i.e, x is a map from variables to their state values"""
self.t[tuple(x[v] for v in self.v) if len(self.v) else 0] = val # lookup location to set, or 0 if scalar f'n
def __float__(self):
"""Convert factor F to scalar float if possible; otherwise raises ValueError"""
if (self.nvar == 0): return self.t[0]
else: raise ValueError("Factor is not a scalar; scope {}".format(self.v))
# TODO missing comparator functions?
def isnan(self):
"""Check for NaN (not-a-number) entries in the factor's values; true if any NaN present"""
return self.isAny( (lambda x: np.isnan(x)) )
def isfinite(self):
"""Check for infinite (-inf, inf) or NaN values in the factor; false if any present"""
return not self.isAny( (lambda x: not np.isfinite(x)) )
def isAny(self,test):
"""Generic check for any entries satisfying lambda-expression "test" in the factor"""
for x in np.nditer(self.t, op_flags=['readonly']):
if test(x):
return True
return False
#### UNARY OPERATIONS ####
def __abs__(self):
"""Return the absolute value of F: G = F.abs() => G(x) = | F(x) | for all x"""
return Factor().__build( self.v.copy() , np.fabs(self.t) )
abs = __abs__
def __neg__(self):
"""Return the negative of F: G = -F => G(x) = -F(x) for all x"""
return Factor().__build( self.v.copy() , np.negative(self.t) )
def exp(self):
"""Return the exponential of F: G = F.exp() => G(x) = exp(F(x)) for all x"""
return Factor().__build( self.v.copy() , np.exp(self.t) )
def __pow__(self,power):
"""Return F raised to a power: G = F.power(p) => G(x) = ( F(x) )^p for all x"""
return Factor().__build( self.v.copy() , np.power(self.t,power) )
power = __pow__
def log(self): # just use base?
"""Return the natural log of F: G = F.log() => G(x) = log( F(x) ) for all x"""
with np.errstate(divide='ignore'):
return Factor().__build( self.v.copy() , np.log(self.t) )
def log2(self):
"""Return the log base 2 of F: G = F.log2() => G(x) = log2( F(x) ) for all x"""
with np.errstate(divide='ignore'):
return Factor().__build( self.v.copy() , np.log2(self.t) )
def log10(self):
"""Return the log base 10 of F: G = F.log10() => G(x) = log10( F(x) ) for all x"""
with np.errstate(divide='ignore'):
return Factor().__build( self.v.copy() , np.log10(self.t) )
#### IN-PLACE UNARY OPERATIONS ####
# always return "self" for chaining: f.negIP().expIP() = exp(-f(x)) in-place
def absIP(self):
"""Take the absolute value of F: F.absIP() => F(x) <- |F(x)| (in-place)"""
np.fabs(self.t, out=self.t)
return self
def expIP(self):
"""Take the exponential of F: F.expIP() => F(x) <- exp(F(x)) (in-place)"""
np.exp(self.t, out=self.t)
return self
def powerIP(self,power):
"""Raise F to a power: F.powerIP(p) => F(x) <- ( F(x) )^p (in-place)"""
np.power(self.t, power, out=self.t)
return self
__ipow__ = powerIP
def logIP(self): # just use base?
"""Take the natural log of F: F.logIP() => F(x) <- log( F(x) ) (in-place)"""
with np.errstate(divide='ignore'):
np.log(self.t, out=self.t)
return self
def log2IP(self):
"""Take the log base 2 of F: F.log2IP() => F(x) <- log2( F(x) ) (in-place)"""
with np.errstate(divide='ignore'):
| np.log2(self.t, out=self.t) | numpy.log2 |
"""
Galaxy wrapper for using Scikit-learn API with Keras models
Author: <NAME>
Email: <EMAIL>
2019 - 2020
"""
import collections
import copy
import h5py
import json
import numpy as np
import random
import tensorflow as tf
import warnings
import six
from abc import ABCMeta
from pathlib import Path
from tensorflow import keras
import sys
from tensorflow.keras.callbacks import (
Callback, CSVLogger, EarlyStopping, LearningRateScheduler,
TensorBoard, RemoteMonitor, ModelCheckpoint, TerminateOnNaN
)
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.optimizers import (
Adadelta, Adagrad, Adam, Adamax, Nadam, SGD, RMSprop, Ftrl)
from tensorflow.keras.utils import (
Sequence, OrderedEnqueuer, GeneratorEnqueuer, to_categorical)
from tensorflow.python.keras.saving import hdf5_format
from tensorflow.python.keras.utils.multi_gpu_utils import multi_gpu_model
from tensorflow.python.keras.utils.data_utils import iter_sequence_infinite
from tensorflow.python.keras.utils.generic_utils import (has_arg,
to_list)
from sklearn.base import (BaseEstimator, ClassifierMixin,
RegressorMixin, clone, is_classifier)
from sklearn.metrics import SCORERS
from sklearn.model_selection import ShuffleSplit, StratifiedShuffleSplit
from sklearn.utils import check_array, check_X_y
from sklearn.utils.multiclass import (check_classification_targets,
type_of_target)
from sklearn.utils.validation import check_is_fitted, check_random_state
from .externals.selene_sdk.utils import compute_score
from . import utils
__all__ = ('KerasEarlyStopping', 'KerasTensorBoard', 'KerasCSVLogger',
'KerasLearningRateScheduler', 'KerasRemoteMonitor',
'KerasModelCheckpoint', 'KerasTerminateOnNaN', 'MetricCallback',
'check_params', 'SearchParam', 'KerasLayers', 'BaseKerasModel',
'KerasGClassifier', 'KerasGRegressor', 'KerasGBatchClassifier')
class BaseOptimizer(BaseEstimator):
"""
Base wrapper for Keras Optimizers
"""
def get_params(self, deep=False):
out = {}
for k, v in self._hyper.items():
if isinstance(v, tf.Variable):
out[k] = v.numpy().item()
else:
out[k] = v
return out
class KerasSGD(SGD, BaseOptimizer):
pass
class KerasRMSprop(RMSprop, BaseOptimizer):
pass
class KerasAdagrad(Adagrad, BaseOptimizer):
pass
class KerasAdadelta(Adadelta, BaseOptimizer):
pass
class KerasAdam(Adam, BaseOptimizer):
pass
class KerasAdamax(Adamax, BaseOptimizer):
pass
class KerasNadam(Nadam, BaseOptimizer):
pass
class KerasEarlyStopping(EarlyStopping, BaseEstimator):
pass
class KerasLearningRateScheduler(LearningRateScheduler, BaseEstimator):
pass
class KerasTensorBoard(TensorBoard, BaseEstimator):
pass
class KerasRemoteMonitor(RemoteMonitor, BaseEstimator):
pass
class KerasModelCheckpoint(ModelCheckpoint, BaseEstimator):
pass
class KerasTerminateOnNaN(TerminateOnNaN, BaseEstimator):
pass
class KerasCSVLogger(CSVLogger, BaseEstimator):
pass
class MetricCallback(Callback, BaseEstimator):
""" A callback to return validation metric
Parameters
----------
scorer : str
Key of sklearn.metrics.SCORERS
"""
def __init__(self, scorer='roc_auc'):
self.scorer = scorer
self.validation_data = None
self.model = None
def on_train_begin(self, logs={}):
return
def on_train_end(self, logs={}):
return
def on_epoch_begin(self, epoch, logs={}):
return
def on_epoch_end(self, epoch, logs={}):
scorer = SCORERS[self.scorer]
print(self.validation_data)
x_val, y_val, _, _ = self.validation_data
pred_probas = self.model.predict(x_val)
pred_labels = (pred_probas > 0.5).astype('int32')
preds = pred_labels if scorer.__class__.__name__ == \
'_PredictScorer' else pred_probas
# binaray
if y_val.ndim == 1 or y_val.shape[-1] == 1:
preds = preds.ravel()
score = scorer._score_func(y_val, preds)
# multi-label
else:
score, _ = compute_score(preds, y_val, scorer._score_func)
print('\r%s_val: %s' % (self.scorer, str(round(score, 4))),
end=100*' '+'\n')
return
def on_batch_begin(self, batch, logs={}):
return
def on_batch_end(self, batch, logs={}):
return
def _get_params_from_dict(dic, name):
"""
Genarate search parameters from `model.get_config()`
Parameter:
----------
dic: dict
name: str, the name of dict.
"""
out = {}
for key, value in six.iteritems(dic):
if isinstance(value, dict):
out['%s__%s' % (name, key)] = value
out.update(_get_params_from_dict(
value, '%s__%s' % (name, key)))
else:
out['%s__%s' % (name, key)] = value
return out
def _param_to_dict(s, v):
"""
Turn search param to deep nested dictionary
"""
rval = {}
key, dlim, sub_key = s.partition('__')
if not dlim:
rval[key] = v
else:
rval[key] = _param_to_dict(sub_key, v)
return rval
def _update_dict(d, u):
"""
Update value for nested dictionary, but not adding new keys
Parameters:
d: dict, the source dictionary
u: dict, contains value to update
"""
for k, v in six.iteritems(u):
if isinstance(v, collections.Mapping):
d[k] = _update_dict(d[k], v)
elif k not in d:
raise KeyError
else:
d[k] = v
return d
def check_params(params, fn):
"""
Check whether params are valid for function(s)
Parameter:
----------
params : dict
fn : function or functions iterables
"""
if not isinstance(fn, (list, tuple)):
fn = [fn]
for p in list(six.iterkeys(params)):
for f in fn:
if has_arg(f, p):
break
else:
raise ValueError(
"{} is not a legal parameter".format(p))
class SearchParam(utils.SearchParam):
"""
Sortable Wrapper class for search parameters
"""
def to_dict(self):
return _param_to_dict(self.s_param, self.value)
class KerasLayers(six.with_metaclass(ABCMeta, BaseEstimator)):
"""
Parameters
-----------
name: str
layers: list of dict, the configuration of model
"""
def __init__(self, name='sequential_1', layers=[]):
self.name = name
self.layers = layers
@property
def named_layers(self):
rval = []
for idx, lyr in enumerate(self.layers):
named = 'layers_%s_%s' % (str(idx), lyr['class_name'])
rval.append((named, lyr))
return rval
def get_params(self, deep=True):
"""Return parameter names for GridSearch"""
out = super(KerasLayers, self).get_params(deep=False)
if not deep:
return out
out.update(self.named_layers)
for name, lyr in self.named_layers:
out.update(_get_params_from_dict(lyr, name))
return out
def set_params(self, **params):
for key in list(six.iterkeys(params)):
if not key.startswith('layers'):
raise ValueError("Only layer structure parameters are "
"not searchable!")
# 1. replace `layers`
if 'layers' in params:
setattr(self, 'layers', params.pop('layers'))
# 2. replace individual layer
layers = self.layers
named_layers = self.named_layers
names = []
named_layers_dict = {}
if named_layers:
names, _ = zip(*named_layers)
named_layers_dict = dict(named_layers)
for name in list(six.iterkeys(params)):
if '__' not in name:
for i, layer_name in enumerate(names):
if layer_name == name:
new_val = params.pop(name)
if new_val is None:
del layers[i]
else:
layers[i] = new_val
break
setattr(self, 'layers', layers)
# 3. replace other layer parameter
search_params = [SearchParam(k, v) for k, v in six.iteritems(params)]
search_params = sorted(search_params, key=lambda x: x.depth)
for param in search_params:
update = param.to_dict()
try:
_update_dict(named_layers_dict, update)
except KeyError:
raise ValueError("Invalid parameter %s for estimator %s. "
"Check the list of available parameters "
"with `estimator.get_params().keys()`." %
(param.s_param, self))
return self
class BaseKerasModel(six.with_metaclass(ABCMeta, BaseEstimator)):
"""
Base class for Galaxy Keras wrapper
Parameters
----------
config : dictionary
From `model.get_config()`
model_type : str
'sequential' or 'functional'
optimizer : str, default 'rmsprop'
One of ['sgd', 'rmsprop', 'adagrad', 'adadelta', 'adam',
'adamax', 'nadam', 'ftrl']. Used in model.compile.
loss : str or None
From Keras `loss`. Used in model.compile.
metrics : list of strings, default []
Used in model.compile.
loss_weights : list or dictionary
Used in model.compile.
run_eagerly : bool, default = False.
If True, this Model's logic will not be wrapped in a `tf.function`.
Recommended to leave this as None unless your Model cannot be run
inside a tf.function. Used in model.compile.
steps_per_execution : int, default = 1.
The number of batches to run during each tf.function call.
Used in model.compile.
learning_rate : None or float
Optimizer parameter, default value changes with `optimizer`.
momentum : None or float
For optimizer `sgd` only, ignored otherwise
nesterov : None or bool
For optimizer `sgd` only, ignored otherwise
epsilon : None or float
Optimizer parameter, default change with `optimizer`
rho : None or float
Optimizer parameter, default change with `optimizer`
centered : bool, default = False
For optimizer 'rmsprop' only, ignored otherwise.
amsgrad : None or bool
for optimizer `adam` only, ignored otherwise
beta_1 : None or float
Optimizer parameter, default change with `optimizer`.
beta_2 : None or float
Optimizer parameter, default change with `optimizer`.
initial_accumulator_value : float
Must be less or equal to zero. For `Ftrl` only.
beta : float
For `Ftrl` only.
learning_rate_power : float
Must be greater than or equal to zero. For `Ftrl` only.
l1_regularization_strength : float
Must be greater than or equal to zero. For `Ftrl` only.
l2_regularization_strength : float
Must be greater than or equal to zero. For `Ftrl` only.
l2_shrinkage_regularization_strength : float
Must be greater than or equal to zero. For `Ftrl` only.
epochs : int
fit_param from Keras
batch_size : None or int, default=None
fit_param, if None, will default to 32
callbacks : None or list of dict
fit_param, each dict contains one type of callback configuration.
e.g. {"callback_selection":
{"callback_type": "EarlyStopping",
"monitor": "val_loss"
"baseline": None,
"min_delta": 0.0,
"patience": 10,
"mode": "auto",
"restore_best_weights": False}}
validation_split : float.
The proportion of training data to set aside as validation set.
Must be within [0, 1). Will be ignored if `validation_data` is
set via fit_params.
steps_per_epoch : int, default is None
fit param. The number of train batches per epoch
validation_steps : None or int, default is None
fit params, validation steps. if None, it will be number
of samples divided by batch_size.
verbose : 0, 1 or 2
Verbosity mode. 0 = silent, 1 = progress bar, 2 = one line per
epoch. If > 0, log device placement
seed : None or int, default None
backend random seed
"""
def __init__(self, config, model_type='sequential',
optimizer='rmsprop', loss=None, metrics=[],
loss_weights=None, run_eagerly=None,
steps_per_execution=None, learning_rate=None,
momentum=None, nesterov=None, epsilon=None,
rho=None, centered=None, amsgrad=None,
beta_1=None, beta_2=None, learning_rate_power=None,
initial_accumulator_value=None, beta=None,
l1_regularization_strength=None,
l2_regularization_strength=None,
l2_shrinkage_regularization_strength=None,
epochs=1, batch_size=None, callbacks=None,
validation_split=0.1, steps_per_epoch=None,
validation_steps=None, verbose=1, seed=None,
**fit_params):
self.config = config
self.model_type = model_type
self.optimizer = optimizer
self.loss = loss
self.metrics = metrics
self.loss_weights = loss_weights
self.run_eagerly = run_eagerly
self.steps_per_execution = steps_per_execution
# optimizer parameters
self.learning_rate = learning_rate
self.momentum = momentum
self.epsilon = epsilon
self.centered = centered
self.nesterov = nesterov
self.rho = rho
self.amsgrad = amsgrad
self.beta_1 = beta_1
self.beta_2 = beta_2
self.learning_rate_power = learning_rate_power
self.initial_accumulator_value = initial_accumulator_value
self.beta = beta
self.l1_regularization_strength = l1_regularization_strength
self.l2_regularization_strength = l2_regularization_strength
self.l2_shrinkage_regularization_strength = \
l2_shrinkage_regularization_strength
# fit parameters
self.epochs = epochs
self.batch_size = batch_size or 32
self.callbacks = callbacks
if not (0.0 <= validation_split < 1.0):
raise ValueError("validation_split must be in range [0, 1)")
self.validation_split = validation_split
self.steps_per_epoch = steps_per_epoch
self.validation_steps = validation_steps
self.verbose = verbose
self.seed = seed
self.fit_params = fit_params
check_params(fit_params, Model.fit)
@property
def _optimizer(self):
if self.optimizer == 'sgd':
options = dict(
learning_rate=self.learning_rate or 0.01,
momentum=self.momentum or 0,
nesterov=self.nesterov or False
)
return SGD(**options)
elif self.optimizer == 'rmsprop':
options = dict(
learning_rate=self.learning_rate or 0.001,
rho=self.rho or 0.9,
momentum=self.momentum or 0.,
epsilon=self.epsilon or 1e-07,
centered=self.centered or False
)
return RMSprop(**options)
elif self.optimizer == 'adam':
options = dict(
learning_rate=self.learning_rate or 0.001,
beta_1=self.beta_1 or 0.9,
beta_2=self.beta_2 or 0.999,
epsilon=self.epsilon or 1e-07,
amsgrad=self.amsgrad or False
)
return Adam(**options)
elif self.optimizer == 'adadelta':
options = dict(
learning_rate=self.learning_rate or 0.001,
rho=self.rho or 0.95,
epsilon=self.epsilon or 1e-7,
)
return Adadelta(**options)
elif self.optimizer == 'adagrad':
options = dict(
learning_rate=self.learning_rate or 0.001,
initial_accumulator_value=(
self.initial_accumulator_value or 0.1),
epsilon=self.epsilon or 1e-07
)
return Adagrad(**options)
elif self.optimizer == 'adamax':
options = dict(
learning_rate=self.learning_rate or 0.001,
beta_1=self.beta_1 or 0.9,
beta_2=self.beta_2 or 0.999,
epsilon=self.epsilon or 1e-07
)
return Adamax(**options)
elif self.optimizer == 'nadam':
options = dict(
learning_rate=self.learning_rate or 0.001,
beta_1=self.beta_1 or 0.9,
beta_2=self.beta_2 or 0.999,
epsilon=self.epsilon or 1e-07
)
return Nadam(**options)
elif self.optimizer == 'ftrl':
options = dict(
learning_rate=self.learning_rate or 0.001,
learning_rate_power=self.learning_rate_power or -0.5,
initial_accumulator_value=(
self.initial_accumulator_value or 0.1),
l1_regularization_strength=(
self.l1_regularization_strength or 0),
l2_regularization_strength=(
self.l2_regularization_strength or 0),
l2_shrinkage_regularization_strength=(
self.l2_shrinkage_regularization_strength or 0),
beta=self.beta or 0.
)
return Ftrl(**options)
else:
raise ValueError("Unsupported optimizer type: %s!"
% self.optimizer)
@property
def named_layers(self):
rval = []
for idx, lyr in enumerate(self.config['layers']):
class_name = lyr['class_name']
if class_name in ['Model', 'Sequential']:
raise ValueError("Model layers are not supported yet!")
named = 'layers_%s_%s' % (str(idx), class_name)
rval.append((named, lyr))
return rval
@property
def _callbacks(self):
""" return list of callback objects from parameters.
suppose correct input format.
Notes
-----
For `filepath`, `log_dir`, `filename`,
if None, `os.getcwd()` is used.
"""
if not self.callbacks:
return None
callbacks = []
for cb in copy.deepcopy(self.callbacks):
params = cb['callback_selection']
callback_type = params.pop('callback_type')
curr_dir = Path.cwd()
if callback_type in ('None', ''):
continue
elif callback_type == 'ModelCheckpoint':
if not params.get('filepath', None):
params['filepath'] = curr_dir.joinpath('weights.hdf5')
elif callback_type == 'TensorBoard':
if not params.get('log_dir', None):
params['log_dir'] = curr_dir.joinpath('logs')
elif callback_type == 'CSVLogger':
if not params:
params['filename'] = curr_dir.joinpath('log.csv')
params['separator'] = '\t'
params['append'] = True
klass = getattr(keras.callbacks, callback_type)
obj = klass(**params)
callbacks.append(obj)
if not callbacks:
return None
return callbacks
def _make_validation_split(self, X, y=None, sample_weight=None):
n_samples = X.shape[0]
if y is not None and is_classifier(self):
splitter_type = StratifiedShuffleSplit
else:
splitter_type = ShuffleSplit
# make split randomness fixed.
random_state = check_random_state(self.seed or 0)
cv = splitter_type(test_size=self.validation_split,
random_state=random_state)
idx_train, idx_val = next(cv.split(X, y))
if idx_train.shape[0] == 0 or idx_val.shape[0] == 0:
raise ValueError(
"Splitting %d samples into a train set and a validation set "
"with validation_split=%r led to an empty set (%d and %d "
"samples). Please either change validation_split or "
"increase number of samples"
% (n_samples, self.validation_split, idx_train.shape[0],
idx_val.shape[0]))
train_data, validation_data = (X[idx_train], ), (X[idx_val], )
if y is None:
train_data += (None, )
validation_data += (None, )
else:
train_data += (y[idx_train], )
validation_data += (y[idx_val], )
if sample_weight is None:
train_data += (None, )
else:
train_data += (sample_weight[idx_train], )
return train_data, validation_data
def _fit(self, X, y, **kwargs):
# base fit
# context._context = None
# context._create_context()
if self.seed is not None:
np.random.seed(self.seed)
random.seed(self.seed)
tf.random.set_seed(self.seed)
# tf.config.threading.set_intra_op_parallelism_threads(1)
# tf.config.threading.set_inter_op_parallelism_threads(1)
# tf.config.set_soft_device_placement(True)
# tf.debugging.set_log_device_placement(self.verbose > 1)
config = self.config
if self.model_type not in ['sequential', 'functional']:
raise ValueError("Unsupported model type %s" % self.model_type)
if self.model_type == 'sequential':
self.model_class_ = Sequential
else:
self.model_class_ = Model
self.model_ = self.model_class_.from_config(
config,
custom_objects=dict(tf=tf))
self.model_.compile(
optimizer=self._optimizer, loss=self.loss, metrics=self.metrics,
loss_weights=self.loss_weights, run_eagerly=self.run_eagerly,
steps_per_execution=self.steps_per_execution
)
if self.loss == 'categorical_crossentropy' and len(y.shape) != 2:
y = to_categorical(y)
fit_params = self.fit_params
fit_params.update(dict(epochs=self.epochs,
batch_size=self.batch_size,
callbacks=self._callbacks,
validation_split=self.validation_split,
steps_per_epoch=self.steps_per_epoch,
validation_steps=self.validation_steps,
verbose=self.verbose))
fit_params.update(kwargs)
sample_weight = fit_params.get('sample_weight', None)
validation_split = fit_params.get('validation_split', 0.)
validation_data = fit_params.get('validation_data', None)
# customize validation split
if validation_split and not validation_data:
train_data, validation_data = self._make_validation_split(
X, y, sample_weight)
X, y, sample_weight = train_data
fit_params['validation_data'] = validation_data
fit_params['sample_weight'] = sample_weight
fit_params['validation_split'] = 0.
history = self.model_.fit(X, y, **fit_params)
return history
def get_params(self, deep=True):
"""Return parameter names for GridSearch"""
out = super(BaseKerasModel, self).get_params(deep=deep)
if not deep:
return out
out.update(self.named_layers)
for name, lyr in self.named_layers:
out.update(_get_params_from_dict(lyr, name))
return out
def set_params(self, **params):
"""
"""
valid_params = self.get_params(deep=False)
# 1. replace `config`
if 'config' in params:
setattr(self, 'config', params.pop('config'))
# 2. replace individual layer or non-layer top level parameters
named_layers = self.named_layers
layer_names = []
named_layers_dict = {}
if named_layers:
layer_names, _ = zip(*named_layers)
named_layers_dict = dict(named_layers)
for name in list(six.iterkeys(params)):
if '__' not in name:
for i, layer_name in enumerate(layer_names):
# replace layer
if layer_name == name:
new_val = params.pop(name)
if new_val is None:
del self.config['layers'][i]
else:
self.config['layers'][i] = new_val
break
else:
# replace non-layer top level parameter
if name not in valid_params:
raise ValueError(
"Invalid parameter %s for estimator %s. "
"Check the list of available parameters "
"with `estimator.get_params().keys()`."
% (name, self))
setattr(self, name, params.pop(name))
# replace nested non-layer parameters
nested_params = collections.defaultdict(dict) # grouped by prefix
# update params
valid_params = self.get_params(deep=True)
for name in list(six.iterkeys(params)):
if name.startswith('layers'):
continue
key, delim, sub_key = name.partition('__')
if key not in valid_params:
raise ValueError("Invalid parameter %s for estimator %s. "
"Check the list of available parameters "
"with `estimator.get_params().keys()`." %
(name, self))
nested_params[key][sub_key] = params.pop(name)
for key, sub_params in nested_params.items():
valid_params[key].set_params(**sub_params)
# 3. replace layer parameter
search_params = [SearchParam(k, v) for k, v in six.iteritems(params)]
search_params = sorted(search_params, key=lambda x: x.depth)
for param in search_params:
update = param.to_dict()
try:
_update_dict(named_layers_dict, update)
except KeyError:
raise ValueError("Invalid parameter %s for estimator %s. "
"Check the list of available parameters "
"with `estimator.get_params().keys()`." %
(param.s_param, self))
return self
def to_json(self):
if hasattr(self, 'model_'):
# fitted
return self.model_.to_json()
else:
config = self.config
if self.model_type not in ['sequential', 'functional']:
raise ValueError("Unsupported model type %s" % self.model_type)
if self.model_type == 'sequential':
model_class_ = Sequential
else:
model_class_ = Model
model_ = model_class_.from_config(
config,
custom_objects=dict(tf=tf))
return model_.to_json()
def save_weights(self, filepath, overwrite=True):
"""Dumps all layer weights to a HDF5 file.
parameters
----------
filepath : str
path to the file to save the weights to.
overwrite : bool, default is True
Whether to silently overwrite any existing file at the
target location, or provide the user with a manual prompt.
"""
if not hasattr(self, 'model_'):
raise ValueError("Keras model is not fitted. No weights to save!")
self.model_.save_weights(
filepath, overwrite=overwrite, save_format='h5')
def load_weights(self, filepath, by_name=False,
skip_mismatch=False, options=None):
"""Loads all layer weights from a HDF5 save file.
parameters
----------
filepath : str
path to the weights file to load.
by_name: Bool
whether to load weights by name or by topological order.
skip_mismatch : Boolean
whether to skip loading of layers where there is a mismatch
in the number of weights, or a mismatch in the shape of the
weight (only valid when `by_name`=True).
options : Optional tf.train.CheckpointOptions object that specifies
options for loading weights.
"""
config = self.config
if self.model_type not in ['sequential', 'functional']:
raise ValueError("Unsupported model type %s" % self.model_type)
if self.model_type == 'sequential':
self.model_class_ = Sequential
else:
self.model_class_ = Model
self.model_ = self.model_class_.from_config(
config,
custom_objects=dict(tf=tf))
self.model_.load_weights(filepath, by_name=by_name,
skip_mismatch=skip_mismatch,
options=options)
def save_model(self, file_or_group, extra_attrs=None, skip_params=None):
""" Serialize configuration and weights to hdf5. Good for prediction.
Should not be used in continuing training.
Parameters
-----------
file_or_group : str, Path-like or h5py.Group objtect.
extra_attrs : list of strings or None.
Extra attributes to serialize.
skip_params : list of strings or None.
List of parameters that don't need to keep.
"""
if not isinstance(file_or_group, h5py.Group):
if not isinstance(file_or_group, (Path, str)):
raise ValueError("Type of `file_or_group` must be str, Path or"
" Group, but got %s!" % type(file_or_group))
group = h5py.File(file_or_group, 'w')
else:
group = file_or_group
class_name = self.__class__.__name__
params = self.get_params(deep=False)
if not skip_params:
skip_params = []
if not isinstance(skip_params, (list, tuple)):
skip_params = [skip_params]
for p in skip_params:
params.pop(p, None)
group['class_name'] = class_name
group['params'] = json.dumps(params).encode('utf8')
if hasattr(self, 'model_'):
weights = group.create_group('weights')
hdf5_format.save_weights_to_hdf5_group(
weights, self.model_.layers)
if extra_attrs:
if not extra_attrs:
extra_attrs = []
if not isinstance(extra_attrs, (list, tuple)):
extra_attrs = [extra_attrs]
attrs = group.create_group('attributes')
for att in extra_attrs:
try:
attrs[att] = getattr(self, att)
except Exception as e:
warnings.warn(e)
continue
if isinstance(file_or_group, (Path, str)):
group.close()
class KerasGClassifier(BaseKerasModel, ClassifierMixin):
"""
Scikit-learn classifier API for Keras
"""
def fit(self, X, y, class_weight=None, **kwargs):
"""
Parameters:
-----------
X : array-like, shape `(n_samples, feature_arrays)`
"""
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'], allow_nd=True,
multi_output=True)
check_classification_targets(y)
check_params(kwargs, Model.fit)
if len(y.shape) == 2 and y.shape[1] > 1:
self.classes_ = np.arange(y.shape[1])
elif (len(y.shape) == 2 and y.shape[1] == 1) or len(y.shape) == 1:
self.classes_ = np.unique(y)
y = np.searchsorted(self.classes_, y)
else:
raise ValueError('Invalid shape for y: ' + str(y.shape))
self.n_classes_ = len(self.classes_)
kwargs.update({'class_weight': class_weight})
return super(KerasGClassifier, self)._fit(X, y, **kwargs)
def _predict(self, X, **kwargs):
check_is_fitted(self, 'model_')
X = check_array(X, accept_sparse=['csc', 'csr'], allow_nd=True)
check_params(kwargs, Model.predict)
preds = self.model_.predict(X, **kwargs)
return preds
def predict_proba(self, X, **kwargs):
probas = self._predict(X, **kwargs)
if probas.min() < 0. or probas.max() > 1.:
warnings.warn('Network returning invalid probability values. '
'The last layer might not normalize predictions '
'into probabilities '
'(like softmax or sigmoid would).')
if probas.shape[1] == 1:
# first column is probability of class 0 and second is of class 1
probas = np.hstack([1 - probas, probas])
return probas
def predict(self, X, **kwargs):
probas = self._predict(X, **kwargs)
if probas.shape[-1] > 1:
# if the last activation is `softmax`, the sum of all
# probibilities will 1, the classification is considered as
# multi-class problem, otherwise, we take it as multi-label.
act = getattr(self.model_.layers[-1], 'activation', None)
if act and act.__name__ == 'softmax':
classes = probas.argmax(axis=-1)
else:
return (probas > 0.5).astype('int32')
else:
classes = (probas > 0.5).astype('int32')
return self.classes_[classes]
def score(self, X, y, **kwargs):
X = check_array(X, accept_sparse=['csc', 'csr'], allow_nd=True)
y = np.searchsorted(self.classes_, y)
check_params(kwargs, Model.evaluate)
if self.loss == 'categorical_crossentropy' and len(y.shape) != 2:
y = to_categorical(y)
outputs = self.model_.evaluate(X, y, **kwargs)
outputs = to_list(outputs)
for name, output in zip(self.model_.metrics_names, outputs):
if name == 'acc':
return output
raise ValueError('The model is not configured to compute accuracy. '
'You should pass `metrics=["accuracy"]` to '
'the `model.compile()` method.')
def save_model(self, file_or_group, extra_attrs=['classes_'],
skip_params=None):
return super().save_model(file_or_group, extra_attrs=extra_attrs,
skip_params=skip_params)
class KerasGRegressor(BaseKerasModel, RegressorMixin):
"""
Scikit-learn API wrapper for Keras regressor
"""
def fit(self, X, y, **kwargs):
X, y = check_X_y(X, y, accept_sparse=['csc', 'csr'], allow_nd=True)
check_params(kwargs, Model.fit)
return super(KerasGRegressor, self)._fit(X, y, **kwargs)
def predict(self, X, **kwargs):
check_is_fitted(self, 'model_')
X = check_array(X, accept_sparse=['csc', 'csr'], allow_nd=True)
check_params(kwargs, Model.predict)
return np.squeeze(self.model_.predict(X, **kwargs), axis=-1)
def score(self, X, y, **kwargs):
check_is_fitted(self, 'model_')
X = check_array(X, accept_sparse=['csc', 'csr'], allow_nd=True)
check_params(kwargs, Model.evaluate)
loss = self.model_.evaluate(X, y, **kwargs)
if isinstance(loss, list):
return -loss[0]
return -loss
class KerasGBatchClassifier(KerasGClassifier):
"""
keras classifier with batch data generator
Parameters
----------
config : dictionary
from `model.get_config()`
data_batch_generator : instance of batch data generator
model_type : str
'sequential' or 'functional'
optimizer : str, default 'sgd'
'sgd', 'rmsprop', 'adagrad', 'adadelta', 'adam', 'adamax', 'nadam'
loss : str, default 'binary_crossentropy'
Keras `loss`.
metrics : list of strings, default []
loss_weights : list or dictionary
Used in model.compile.
run_eagerly : bool, default = False.
If True, this Model's logic will not be wrapped in a `tf.function`.
Recommended to leave this as None unless your Model cannot be run
inside a tf.function. Used in model.compile.
steps_per_execution : int, default = 1.
The number of batches to run during each tf.function call.
Used in model.compile.
learning_rate : None or float
Optimizer parameter, default value changes with `optimizer`.
momentum : None or float
For optimizer `sgd` only, ignored otherwise
nesterov : None or bool
For optimizer `sgd` only, ignored otherwise
epsilon : None or float
Optimizer parameter, default change with `optimizer`
rho : None or float
Optimizer parameter, default change with `optimizer`
centered : bool, default = False
For optimizer 'rmsprop' only, ignored otherwise.
amsgrad : None or bool
for optimizer `adam` only, ignored otherwise
beta_1 : None or float
Optimizer parameter, default change with `optimizer`.
beta_2 : None or float
Optimizer parameter, default change with `optimizer`.
initial_accumulator_value : float
Must be less or equal to zero. For `Ftrl` only.
beta : float
For `Ftrl` only.
learning_rate_power : float
Must be greater than or equal to zero. For `Ftrl` only.
l1_regularization_strength : float
Must be greater than or equal to zero. For `Ftrl` only.
l2_regularization_strength : float
Must be greater than or equal to zero. For `Ftrl` only.
l2_shrinkage_regularization_strength : float
Must be greater than or equal to zero. For `Ftrl` only.
epochs : int
fit_param from Keras
batch_size : None or int, default=None
fit_param, if None, will default to 32
callbacks : None or list of dict
each dict contains one type of callback configuration.
e.g. {"callback_selection":
{"callback_type": "EarlyStopping",
"monitor": "val_loss"
"baseline": None,
"min_delta": 0.0,
"patience": 10,
"mode": "auto",
"restore_best_weights": False}}
validation_split : Float. default=0.
The proportion of training data to set aside as validation set.
Must be within [0, 1). Will be ignored if `validation_data` is
set via fit_params.
steps_per_epoch : int, default is None
fit param. The number of train batches per epoch
validation_steps : None or int, default is None
fit params, validation steps. if None, it will be number
of samples divided by batch_size.
verbose : 0, 1 or 2
Verbosity mode. 0 = silent, 1 = progress bar, 2 = one line per
epoch. If > 0, log device placement
seed : None or int, default None
Backend random seed
n_jobs : int, default=1
prediction_steps : None or int, default is None
prediction steps. If None, it will be number of samples
divided by batch_size.
class_positive_factor : int or float, default=1
For binary classification only. If int, like 5, will
convert to class_weight {0: 1, 1: 5}.
If float, 0.2, corresponds to class_weight
{0: 1/0.2, 1: 1}
"""
def __init__(self, config, data_batch_generator=None,
model_type='sequential', optimizer='rmsprop',
loss='binary_crossentropy', metrics=[],
loss_weights=None, run_eagerly=None,
steps_per_execution=None, learning_rate=None,
momentum=None, nesterov=None, epsilon=None, rho=None,
centered=None, amsgrad=None, beta_1=None, beta_2=None,
learning_rate_power=None, initial_accumulator_value=None,
beta=None, l1_regularization_strength=None,
l2_regularization_strength=None,
l2_shrinkage_regularization_strength=None,
epochs=1, batch_size=None, callbacks=None,
validation_split=0., steps_per_epoch=None,
validation_steps=None, verbose=1, seed=None,
n_jobs=1, prediction_steps=None,
class_positive_factor=1, **fit_params):
super(KerasGBatchClassifier, self).__init__(
config, model_type=model_type, optimizer=optimizer,
loss=loss, metrics=metrics, loss_weights=loss_weights,
run_eagerly=run_eagerly, steps_per_execution=steps_per_execution,
learning_rate=learning_rate, momentum=momentum,
nesterov=nesterov, epsilon=epsilon, rho=rho,
centered=centered, amsgrad=amsgrad, beta_1=beta_1,
beta_2=beta_2, learning_rate_power=learning_rate_power,
initial_accumulator_value=initial_accumulator_value,
beta=beta, l1_regularization_strength=l1_regularization_strength,
l2_regularization_strength=l2_regularization_strength,
l2_shrinkage_regularization_strength=(
l2_shrinkage_regularization_strength),
epochs=epochs, batch_size=batch_size, callbacks=callbacks,
validation_split=validation_split,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps, verbose=verbose,
seed=seed, **fit_params)
self.data_batch_generator = data_batch_generator
self.n_jobs = n_jobs
self.prediction_steps = prediction_steps
self.class_positive_factor = class_positive_factor
def fit(self, X, y=None, class_weight=None, sample_weight=None, **kwargs):
""" fit the model
"""
if self.seed is not None:
np.random.seed(self.seed)
random.seed(self.seed)
tf.random.set_seed(self.seed)
check_params(kwargs, Model.fit_generator)
self.data_generator_ = clone(self.data_batch_generator)
self.data_generator_.set_processing_attrs()
if y is not None:
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'], allow_nd=True,
multi_output=True)
check_classification_targets(y)
if len(y.shape) == 2 and y.shape[1] > 1:
self.classes_ = np.arange(y.shape[1])
elif (len(y.shape) == 2 and y.shape[1] == 1) or len(y.shape) == 1:
self.classes_ = np.unique(y)
y = np.searchsorted(self.classes_, y)
else:
raise ValueError('Invalid shape for y: ' + str(y.shape))
self.n_classes_ = len(self.classes_)
if self.loss == 'categorical_crossentropy' and len(y.shape) != 2:
y = to_categorical(y)
else:
X = check_array(X, accept_sparse=['csr', 'csc'], allow_nd=True)
if hasattr(self.data_generator_, 'target_path'):
# for GenomicIntervalBatchGenerator
self.classes_ = np.arange(
max(self.data_generator_.n_features_, 2))
self.n_classes_ = len(self.classes_)
if self.classes_.tolist() == [0, 1] and class_weight is None:
if self.class_positive_factor > 1:
class_weight = {0: 1, 1: self.class_positive_factor}
elif self.class_positive_factor < 1.0:
class_weight = {0: 1/self.class_positive_factor, 1: 1}
if class_weight is not None:
kwargs['class_weight'] = class_weight
config = self.config
if self.model_type not in ['sequential', 'functional']:
raise ValueError("Unsupported model type %s" % self.model_type)
if self.model_type == 'sequential':
self.model_class_ = Sequential
else:
self.model_class_ = Model
self.model_ = self.model_class_.from_config(
config,
custom_objects=dict(tf=tf))
# enable multiple gpu mode
try:
self.model_ = multi_gpu_model(self.model_)
except Exception:
pass
self.model_.compile(
optimizer=self._optimizer, loss=self.loss, metrics=self.metrics,
loss_weights=self.loss_weights, run_eagerly=self.run_eagerly,
steps_per_execution=self.steps_per_execution
)
fit_params = self.fit_params
fit_params.update(dict(epochs=self.epochs,
callbacks=self._callbacks,
steps_per_epoch=self.steps_per_epoch,
validation_steps=self.validation_steps,
verbose=self.verbose))
fit_params.update(kwargs)
sample_weight = fit_params.get('sample_weight', None)
validation_data = fit_params.get('validation_data', None)
# customize validation split
if self.validation_split and not validation_data:
train_data, validation_data = self._make_validation_split(
X, y, sample_weight)
X, y, sample_weight = train_data
fit_params['validation_data'] = validation_data
# make validation data generator
if validation_data:
val_steps = fit_params.pop('validation_steps', None)
if val_steps:
val_size = val_steps * self.batch_size
else:
val_size = validation_data[0].shape[0]
fit_params['validation_data'] = \
self.data_generator_.sample(*validation_data,
sample_size=val_size)
history = self.model_.fit_generator(
self.data_generator_.flow(X, y, batch_size=self.batch_size,
sample_weight=sample_weight),
shuffle=self.seed is None,
**fit_params)
return history
def _predict(self, X, **kwargs):
"""
Parameter
---------
X : 2-D or array in other shape
If 2-D array in (indices, 1) shape and
call generator.
Otherwise, predict using `self.model_`.
data_generator : obj
Data generator transfrom to array.
kwargs : dict
Other predict parameters.
"""
check_is_fitted(self, 'model_')
X = check_array(X, accept_sparse=['csc', 'csr'], allow_nd=True)
pred_data_generator = kwargs.pop('data_generator', None)
check_params(kwargs, Model.predict_generator)
batch_size = kwargs.pop('batch_size', None) or self.batch_size
n_jobs = self.n_jobs
steps = kwargs.pop('steps', None)
if not steps:
steps = self.prediction_steps
# make predict data generator
if X.ndim == 2 and X.shape[1] == 1:
if not pred_data_generator:
pred_data_generator = getattr(self, 'data_generator_', None)
if not pred_data_generator:
if hasattr(self, 'data_batch_generator'):
pred_data_generator = clone(self.data_batch_generator)
pred_data_generator.set_processing_attrs()
else:
raise ValueError("Prediction asks for a data_generator, "
"but none is provided!")
preds = self.model_.predict_generator(
pred_data_generator.flow(X, batch_size=batch_size),
steps=steps,
workers=n_jobs,
use_multiprocessing=False,
**kwargs)
# X was transformed
else:
preds = self.model_.predict(X, batch_size=batch_size,
**kwargs)
if preds.min() < 0. or preds.max() > 1.:
warnings.warn('Network returning invalid probability values. '
'The last layer might not normalize predictions '
'into probabilities '
'(like softmax or sigmoid would).')
return preds
def score(self, X, y=None, **kwargs):
"""
Return evaluation scores based on metrics passed through compile
parameters.
Only support batch compatible parameters, like acc.
"""
X = check_array(X, accept_sparse=['csc', 'csr'], allow_nd=True)
if y is not None:
y = np.searchsorted(self.classes_, y)
if self.loss == 'categorical_crossentropy' and len(y.shape) != 2:
y = to_categorical(y)
data_generator = kwargs.pop('data_generator', None)
if not data_generator:
data_generator_ = self.data_generator_
check_params(kwargs, Model.predict_generator)
check_params(kwargs, Model.evaluate_generator)
n_jobs = self.n_jobs
batch_size = self.batch_size or 32
steps = kwargs.pop('steps', None)
if not steps:
steps = self.prediction_steps
outputs = self.model_.evaluate_generator(
data_generator_.flow(X, y=y, batch_size=batch_size),
steps=steps,
n_jobs=n_jobs,
use_multiprocessing=False,
**kwargs)
outputs = to_list(outputs)
for name, output in zip(self.model_.metrics_names, outputs):
if name == 'acc':
return output
raise ValueError('The model is not configured to compute accuracy. '
'You should pass `metrics=["accuracy"]` to '
'the `model.compile()` method.')
def evaluate(self, X_test, y_test=None, scorers=None, error_score='raise',
steps=None, batch_size=None):
"""Compute the score(s) with sklearn scorers on a given test
set. Will return a dict of floats if scorer is a dict, otherwise a
single float is returned.
"""
if not steps:
steps = self.prediction_steps
if not batch_size:
batch_size = self.batch_size
generator = self.data_generator_.flow(X_test, y=y_test,
batch_size=batch_size)
pred_probas, y_true = _predict_generator(self.model_, generator,
steps=steps)
t_type = type_of_target(y_true)
# TODO: multi-class metrics
if t_type not in ('binary', 'multilabel-indicator'):
raise ValueError("Scorer for multi-class classification is not "
"yet implemented!")
# binary classification and multi-class
if t_type == 'binary':
pred_probas = pred_probas.ravel()
pred_labels = (pred_probas > 0.5).astype('int32')
targets = y_true.ravel().astype('int32')
else:
pred_labels = (pred_probas > 0.5).astype('int32')
targets = y_true.astype('int32')
if not isinstance(scorers, dict):
try:
preds = pred_labels if scorers.__class__.__name__ == \
'_PredictScorer' else pred_probas
score_func = scorers._score_func \
if t_type == 'binary' \
else compute_score
score = score_func(targets, preds, **scorers._kwargs)
except Exception:
if error_score == 'raise':
raise
else:
score = error_score
return score
else:
scores = {}
try:
for name, scorer in scorers.items():
preds = pred_labels if scorer.__class__.__name__\
== '_PredictScorer' else pred_probas
score_func = scorer._score_func \
if t_type == 'binary' \
else compute_score
score = score_func(targets, preds, **scorer._kwargs)
scores[name] = score
except Exception:
if error_score == 'raise':
raise
else:
scores = {name: error_score for name in scorers}
return scores
def save_model(self, file_or_group, extra_attrs=['classes_'],
skip_params=['data_batch_generator']):
return super().save_model(file_or_group, extra_attrs=extra_attrs,
skip_params=skip_params)
def _predict_generator(model, generator, steps=None,
max_queue_size=10, workers=1,
use_multiprocessing=False,
verbose=0):
"""Override keras predict_generator to output true labels together
with prediction results
"""
# TODO: support prediction callbacks
model.make_predict_function()
steps_done = 0
all_preds = []
all_y = []
use_sequence_api = isinstance(generator, Sequence)
if not use_sequence_api and use_multiprocessing and workers > 1:
warnings.warn(
UserWarning('Using a generator with `use_multiprocessing=True`'
' and multiple workers may duplicate your data.'
' Please consider using the `keras.utils.Sequence'
' class.'))
if steps is None:
if use_sequence_api:
steps = len(generator)
else:
raise ValueError('`steps=None` is only valid for a generator'
' based on the `keras.utils.Sequence` class.'
' Please specify `steps` or use the'
' `keras.utils.Sequence` class.')
enqueuer = None
try:
if workers > 0:
if use_sequence_api:
enqueuer = OrderedEnqueuer(
generator,
use_multiprocessing=use_multiprocessing)
else:
enqueuer = GeneratorEnqueuer(
generator,
use_multiprocessing=use_multiprocessing)
enqueuer.start(workers=workers, max_queue_size=max_queue_size)
output_generator = enqueuer.get()
else:
if use_sequence_api:
output_generator = iter_sequence_infinite(generator)
else:
output_generator = generator
while steps_done < steps:
generator_output = next(output_generator)
if isinstance(generator_output, tuple):
# Compatibility with the generators
# used for training.
if len(generator_output) == 2:
x, y = generator_output
elif len(generator_output) == 3:
x, y, _ = generator_output
else:
raise ValueError('Output of generator should be '
'a tuple `(x, y, sample_weight)` '
'or `(x, y)`. Found: ' +
str(generator_output))
else:
# Assumes a generator that only
# yields inputs (not targets and sample weights).
x = generator_output
outs = model.predict_on_batch(x)
outs = to_list(outs)
if not all_preds:
for out in outs:
all_preds.append([])
all_y.append([])
for i, out in enumerate(outs):
all_preds[i].append(out)
all_y[i].append(y)
steps_done += 1
finally:
if enqueuer is not None:
enqueuer.stop()
if len(all_preds) == 1:
if steps_done == 1:
return all_preds[0][0], all_y[0][0]
else:
return np.concatenate(all_preds[0]), np.concatenate(all_y[0])
if steps_done == 1:
return [out[0] for out in all_preds], [label[0] for label in all_y]
else:
return ([ | np.concatenate(out) | numpy.concatenate |
import gzip
import numpy as np
import math
import time
import pandas as pd
start = time.time()
total_class = 10
cols = 28
rows = 28
total_px = cols * rows
dt = np.dtype(np.uint8).newbyteorder(">")
def loadMNIST(data_file, label_file):
f_ti = open(data_file, "rb")
f_tl = open(label_file, "rb")
_ = f_ti.read(4) # magic_number(4 bytes)
img_num = int.from_bytes(f_ti.read(4), "big")
rows = int.from_bytes(f_ti.read(4), "big")
cols = int.from_bytes(f_ti.read(4), "big")
_ = f_tl.read(8) # magic_number(4 bytes), item number(4 bytes)
img_pixels = np.zeros((img_num, rows * cols), dtype=int)
img_label = np.zeros(img_num, dtype=int)
for n in range(img_num):
pixels = f_ti.read(rows * cols)
img_pixels[n] = np.frombuffer(pixels, dtype=dt)
img_label[n] = int.from_bytes(f_tl.read(1), "big")
f_ti.close()
f_tl.close()
return img_pixels, img_label
def printNumber(print_str, guess, labels=np.arange(10)):
for c in range(total_class):
print(print_str + " {}:".format(c))
for px_idx in range(total_px):
if px_idx % rows == cols - 1:
print()
else:
print("{:d}".format(guess[labels[c]][px_idx] >= 0.5), end=" ")
print()
def printResult(train_x, train_y, _lambda, px_prob, iterations):
# (ground truth labels, highest possibility label)
# count the number of ground truth c, with highest possibility class p
count_cls = np.zeros((total_class, total_class))
pred_y = np.zeros(train_x.shape[0], dtype=int)
px_prob_comp = 1.0 - px_prob
train_x_comp = 1 - train_x
for n in range(train_x.shape[0]):
# compute the prediction (highest probability)
prob_cls = _lambda.copy()
for k in range(total_class):
prob_cls[k] = prob_cls[k] * (
np.prod(
np.multiply(px_prob[k, :], train_x[n, :])
+ np.multiply(px_prob_comp[k, :], train_x_comp[n, :])
)
)
pred_y[n] = np.argmax(prob_cls)
count_cls[train_y[n]][pred_y[n]] += 1
# find the corresponding number
table = count_cls.copy()
labels = np.full(total_class, -1, dtype=int)
for k in range(total_class):
max_idx = np.argmax(count_cls, axis=None)
(lb_num, lb) = np.unravel_index(max_idx, count_cls.shape)
labels[lb_num] = lb
count_cls[lb_num, :] = -1
count_cls[:, lb] = -1 # set impossible value
printNumber("labeled class", px_prob, labels)
# plot confusion matrix
cmx = | np.zeros((total_class, 2, 2), dtype=int) | numpy.zeros |
import numpy as np
import pytest
from dnnv.nn.graph import OperationGraph
from dnnv.nn import operations
from dnnv.properties.expressions import Network
from dnnv.verifiers.common.reductions.iopolytope import *
from dnnv.verifiers.common.reductions.iopolytope import Variable
def setup_function():
Variable._count = 0
def test_init_merge():
input_op_0 = operations.Input((1, 5), np.dtype(np.float32))
add_op_0 = operations.Add(input_op_0, 1)
op_graph_0 = OperationGraph([add_op_0])
N0 = Network("N0").concretize(op_graph_0)
input_op_1 = operations.Input((1, 5), np.dtype(np.float32))
sub_op_1 = operations.Sub(input_op_1, 1)
op_graph_1 = OperationGraph([sub_op_1])
N1 = Network("N1").concretize(op_graph_1)
input_constraint = HalfspacePolytope()
output_constraint = HalfspacePolytope()
prop = IOPolytopeProperty([N0, N1], input_constraint, output_constraint)
assert len(prop.op_graph.output_operations) == 2
assert isinstance(prop.op_graph.output_operations[0], operations.Add)
assert isinstance(prop.op_graph.output_operations[1], operations.Sub)
assert len(prop.op_graph.input_details) == 1
def test_str():
input_op = operations.Input((1, 5), np.dtype(np.float32))
add_op = operations.Add(input_op, 1)
op_graph = OperationGraph([add_op])
N = Network("N").concretize(op_graph)
vi = Variable((1, 5))
input_constraint = HalfspacePolytope(vi)
input_constraint.update_constraint([vi], np.array([(0, 1)]), np.array([1.0]), 5.0)
vo = Variable((1, 5))
output_constraint = HalfspacePolytope(vo)
output_constraint.update_constraint([vo], np.array([(0, 0)]), np.array([2.0]), 12.0)
prop = IOPolytopeProperty([N], input_constraint, output_constraint)
assert str(prop) == (
"Property:\n"
" Networks:\n"
" [Network('N')]\n"
" Input Constraint:\n"
" 1.0 * x_0[(0, 1)] <= 5.0\n"
" Output Constraint:\n"
" 2.0 * x_1[(0, 0)] <= 12.0"
)
def test_validate_counter_example_true():
input_op = operations.Input((1, 2), np.dtype(np.float32))
matmul_op = operations.MatMul(input_op, np.array([[1.0], [1.0]], dtype=np.float32))
add_op = operations.Add(matmul_op, 1)
op_graph = OperationGraph([add_op])
N = Network("N").concretize(op_graph)
vi = Variable((1, 2))
input_constraint = HalfspacePolytope(vi)
variables = [vi, vi]
indices = np.array([(0, 0), (0, 1)])
coeffs = np.array([1.0, 1.0])
b = np.array(2)
input_constraint.update_constraint(variables, indices, coeffs, b)
coeffs = np.array([-1.0, -1.0])
b = | np.array(2) | numpy.array |
import unittest
import qteasy as qt
import pandas as pd
from pandas import Timestamp
import numpy as np
from numpy import int64
import itertools
import datetime
from qteasy.utilfuncs import list_to_str_format, regulate_date_format, time_str_format, str_to_list
from qteasy.utilfuncs import maybe_trade_day, is_market_trade_day, prev_trade_day, next_trade_day, prev_market_trade_day
from qteasy.utilfuncs import next_market_trade_day
from qteasy.space import Space, Axis, space_around_centre, ResultPool
from qteasy.core import apply_loop
from qteasy.built_in import SelectingFinanceIndicator
from qteasy.history import stack_dataframes
from qteasy.tsfuncs import income, indicators, name_change, get_bar
from qteasy.tsfuncs import stock_basic, trade_calendar, new_share, get_index
from qteasy.tsfuncs import balance, cashflow, top_list, index_indicators, composite
from qteasy.tsfuncs import future_basic, future_daily, options_basic, options_daily
from qteasy.tsfuncs import fund_basic, fund_net_value, index_basic
from qteasy.evaluate import eval_alpha, eval_benchmark, eval_beta, eval_fv
from qteasy.evaluate import eval_info_ratio, eval_max_drawdown, eval_sharp
from qteasy.evaluate import eval_volatility
from qteasy.tafuncs import bbands, dema, ema, ht, kama, ma, mama, mavp, mid_point
from qteasy.tafuncs import mid_price, sar, sarext, sma, t3, tema, trima, wma, adx, adxr
from qteasy.tafuncs import apo, bop, cci, cmo, dx, macd, macdext, aroon, aroonosc
from qteasy.tafuncs import macdfix, mfi, minus_di, minus_dm, mom, plus_di, plus_dm
from qteasy.tafuncs import ppo, roc, rocp, rocr, rocr100, rsi, stoch, stochf, stochrsi
from qteasy.tafuncs import trix, ultosc, willr, ad, adosc, obv, atr, natr, trange
from qteasy.tafuncs import avgprice, medprice, typprice, wclprice, ht_dcperiod
from qteasy.tafuncs import ht_dcphase, ht_phasor, ht_sine, ht_trendmode, cdl2crows
from qteasy.tafuncs import cdl3blackcrows, cdl3inside, cdl3linestrike, cdl3outside
from qteasy.tafuncs import cdl3starsinsouth, cdl3whitesoldiers, cdlabandonedbaby
from qteasy.tafuncs import cdladvanceblock, cdlbelthold, cdlbreakaway, cdlclosingmarubozu
from qteasy.tafuncs import cdlconcealbabyswall, cdlcounterattack, cdldarkcloudcover
from qteasy.tafuncs import cdldoji, cdldojistar, cdldragonflydoji, cdlengulfing
from qteasy.tafuncs import cdleveningdojistar, cdleveningstar, cdlgapsidesidewhite
from qteasy.tafuncs import cdlgravestonedoji, cdlhammer, cdlhangingman, cdlharami
from qteasy.tafuncs import cdlharamicross, cdlhighwave, cdlhikkake, cdlhikkakemod
from qteasy.tafuncs import cdlhomingpigeon, cdlidentical3crows, cdlinneck
from qteasy.tafuncs import cdlinvertedhammer, cdlkicking, cdlkickingbylength
from qteasy.tafuncs import cdlladderbottom, cdllongleggeddoji, cdllongline, cdlmarubozu
from qteasy.tafuncs import cdlmatchinglow, cdlmathold, cdlmorningdojistar, cdlmorningstar
from qteasy.tafuncs import cdlonneck, cdlpiercing, cdlrickshawman, cdlrisefall3methods
from qteasy.tafuncs import cdlseparatinglines, cdlshootingstar, cdlshortline, cdlspinningtop
from qteasy.tafuncs import cdlstalledpattern, cdlsticksandwich, cdltakuri, cdltasukigap
from qteasy.tafuncs import cdlthrusting, cdltristar, cdlunique3river, cdlupsidegap2crows
from qteasy.tafuncs import cdlxsidegap3methods, beta, correl, linearreg, linearreg_angle
from qteasy.tafuncs import linearreg_intercept, linearreg_slope, stddev, tsf, var, acos
from qteasy.tafuncs import asin, atan, ceil, cos, cosh, exp, floor, ln, log10, sin, sinh
from qteasy.tafuncs import sqrt, tan, tanh, add, div, max, maxindex, min, minindex, minmax
from qteasy.tafuncs import minmaxindex, mult, sub, sum
from qteasy.history import get_financial_report_type_raw_data, get_price_type_raw_data
from qteasy.database import DataSource
from qteasy._arg_validators import _parse_string_kwargs, _valid_qt_kwargs
class TestCost(unittest.TestCase):
def setUp(self):
self.amounts = np.array([10000, 20000, 10000])
self.op = np.array([0, 1, -0.33333333])
self.prices = np.array([10, 20, 10])
self.r = qt.Cost()
def test_rate_creation(self):
print('testing rates objects\n')
self.assertIsInstance(self.r, qt.Cost, 'Type should be Rate')
def test_rate_operations(self):
self.assertEqual(self.r['buy_fix'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['sell_fix'], 0.0, 'Item got is wrong')
self.assertEqual(self.r['buy_rate'], 0.003, 'Item got is incorrect')
self.assertEqual(self.r['sell_rate'], 0.001, 'Item got is incorrect')
self.assertEqual(self.r['buy_min'], 5., 'Item got is incorrect')
self.assertEqual(self.r['sell_min'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['slipage'], 0.0, 'Item got is incorrect')
self.assertEqual(np.allclose(self.r(self.amounts), [0.003, 0.003, 0.003]), True, 'fee calculation wrong')
def test_rate_fee(self):
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nSell result with fixed rate = 0.001 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33299.999667, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.333332999999996, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33296.67, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.33, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 32967.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997.00897308, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82053838484547, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 1:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 1))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -19999.82, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -18054., msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 54.0, msg='result incorrect')
def test_min_fee(self):
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 300
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 985, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 10))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 10)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33033.333)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33030)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 32700)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
def test_rate_with_min(self):
"""Test transaction cost calculated by rate with min_fee"""
self.r.buy_rate = 0.0153
self.r.sell_rate = 0.01
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 333
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 984.9305624, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 301.3887520929774, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 10))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 10)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32999.99967)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.33333)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32996.7)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.3)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32667.0)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.0)
def test_fixed_fee(self):
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 200
self.r.sell_fix = 150
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nselling result of fixed cost with fixed fee = 150 and moq=0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 0))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], 33183.333, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150.0, msg='result incorrect')
print('\nselling result of fixed cost with fixed fee = 150 and moq=100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3300.]), True,
f'result incorrect, {test_fixed_fee_result[0]} does not equal to [0,0,-3400]')
self.assertAlmostEqual(test_fixed_fee_result[1], 32850., msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150., msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 990., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18200.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
def test_slipage(self):
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.slipage = 1E-9
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
print('\nselling result with fixed rate = 0.001 and slipage = 1E-10:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True,
f'{test_fixed_fee_result[0]} does not equal to [0, 0, -10000]')
self.assertAlmostEqual(test_fixed_fee_result[1], 33298.88855591,
msg=f'{test_fixed_fee_result[1]} does not equal to 99890.')
self.assertAlmostEqual(test_fixed_fee_result[2], 34.44444409,
msg=f'{test_fixed_fee_result[2]} does not equal to -36.666663.')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 996.98909294, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 60.21814121353513, msg='result incorrect')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18054.36, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 54.36, msg='result incorrect')
class TestSpace(unittest.TestCase):
def test_creation(self):
"""
test if creation of space object is fine
"""
# first group of inputs, output Space with two discr axis from [0,10]
print('testing space objects\n')
# pars_list = [[(0, 10), (0, 10)],
# [[0, 10], [0, 10]]]
#
# types_list = ['discr',
# ['discr', 'discr']]
#
# input_pars = itertools.product(pars_list, types_list)
# for p in input_pars:
# # print(p)
# s = qt.Space(*p)
# b = s.boes
# t = s.types
# # print(s, t)
# self.assertIsInstance(s, qt.Space)
# self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
# self.assertEqual(t, ['discr', 'discr'], 'types incorrect')
#
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = ['foo, bar',
['foo', 'bar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['enum', 'enum'], 'types incorrect')
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = [['discr', 'foobar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['discr', 'enum'], 'types incorrect')
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types='conti, enum')
self.assertEqual(s.types, ['conti', 'enum'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 2))
self.assertEqual(s.shape, (np.inf, 2))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(1, 2), (2, 3), (3, 4)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['discr', 'discr', 'discr'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (2, 2, 2))
self.assertEqual(s.shape, (2, 2, 2))
self.assertEqual(s.count, 8)
self.assertEqual(s.boes, [(1, 2), (2, 3), (3, 4)])
pars_list = [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
pars_list = [((1, 2, 3), (2, 3, 4), (3, 4, 5))]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum'])
self.assertEqual(s.dim, 1)
self.assertEqual(s.size, (3,))
self.assertEqual(s.shape, (3,))
self.assertEqual(s.count, 3)
pars_list = ((1, 2, 3), (2, 3, 4), (3, 4, 5))
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
def test_extract(self):
"""
:return:
"""
pars_list = [(0, 10), (0, 10)]
types_list = ['discr', 'discr']
s = Space(pars=pars_list, par_types=types_list)
extracted_int, count = s.extract(3, 'interval')
extracted_int_list = list(extracted_int)
print('extracted int\n', extracted_int_list)
self.assertEqual(count, 16, 'extraction count wrong!')
self.assertEqual(extracted_int_list, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
extracted_rand, count = s.extract(10, 'rand')
extracted_rand_list = list(extracted_rand)
self.assertEqual(count, 10, 'extraction count wrong!')
print('extracted rand\n', extracted_rand_list)
for point in list(extracted_rand_list):
self.assertEqual(len(point), 2)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
extracted_int2, count = s.extract(3, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list2 = list(extracted_int2)
self.assertEqual(extracted_int_list2, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
print('extracted int list 2\n', extracted_int_list2)
self.assertIsInstance(extracted_int_list2[0][0], float)
self.assertIsInstance(extracted_int_list2[0][1], (int, int64))
extracted_rand2, count = s.extract(10, 'rand')
self.assertEqual(count, 10, 'extraction count wrong!')
extracted_rand_list2 = list(extracted_rand2)
print('extracted rand list 2:\n', extracted_rand_list2)
for point in extracted_rand_list2:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], float)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], (int, int64))
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), ('a', 'b')]
s = Space(pars=pars_list, par_types='enum, enum')
extracted_int3, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list3 = list(extracted_int3)
self.assertEqual(extracted_int_list3, [(0., 'a'), (0., 'b'), (10, 'a'), (10, 'b')],
'space extraction wrong!')
print('extracted int list 3\n', extracted_int_list3)
self.assertIsInstance(extracted_int_list3[0][0], float)
self.assertIsInstance(extracted_int_list3[0][1], str)
extracted_rand3, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list3 = list(extracted_rand3)
print('extracted rand list 3:\n', extracted_rand_list3)
for point in extracted_rand_list3:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (float, int))
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], str)
self.assertIn(point[1], ['a', 'b'])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14))]
s = Space(pars=pars_list, par_types='enum')
extracted_int4, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list4 = list(extracted_int4)
it = zip(extracted_int_list4, [(0, 10), (1, 'c'), (0, 'b'), (1, 14)])
for item, item2 in it:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 4\n', extracted_int_list4)
self.assertIsInstance(extracted_int_list4[0], tuple)
extracted_rand4, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list4 = list(extracted_rand4)
print('extracted rand list 4:\n', extracted_rand_list4)
for point in extracted_rand_list4:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (int, str))
self.assertIn(point[0], [0, 1, 'a'])
self.assertIsInstance(point[1], (int, str))
self.assertIn(point[1], [10, 14, 'b', 'c'])
self.assertIn(point, [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14)), (1, 4)]
s = Space(pars=pars_list, par_types='enum, discr')
extracted_int5, count = s.extract(1, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list5 = list(extracted_int5)
for item, item2 in extracted_int_list5:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 5\n', extracted_int_list5)
self.assertIsInstance(extracted_int_list5[0], tuple)
extracted_rand5, count = s.extract(5, 'rand')
self.assertEqual(count, 5, 'extraction count wrong!')
extracted_rand_list5 = list(extracted_rand5)
print('extracted rand list 5:\n', extracted_rand_list5)
for point in extracted_rand_list5:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], tuple)
print(f'type of point[1] is {type(point[1])}')
self.assertIsInstance(point[1], (int, np.int64))
self.assertIn(point[0], [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
print(f'test incremental extraction')
pars_list = [(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)]
s = Space(pars_list)
ext, count = s.extract(64, 'interval')
self.assertEqual(count, 4096)
points = list(ext)
# 已经取出所有的点,围绕其中10个点生成十个subspaces
# 检查是否每个subspace都为Space,是否都在s范围内,使用32生成点集,检查生成数量是否正确
for point in points[1000:1010]:
subspace = s.from_point(point, 64)
self.assertIsInstance(subspace, Space)
self.assertTrue(subspace in s)
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
ext, count = subspace.extract(32)
points = list(ext)
self.assertGreaterEqual(count, 512)
self.assertLessEqual(count, 4096)
print(f'\n---------------------------------'
f'\nthe space created around point <{point}> is'
f'\n{subspace.boes}'
f'\nand extracted {count} points, the first 5 are:'
f'\n{points[:5]}')
def test_axis_extract(self):
# test axis object with conti type
axis = Axis((0., 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'conti')
self.assertEqual(axis.axis_boe, (0., 5.))
self.assertEqual(axis.count, np.inf)
self.assertEqual(axis.size, 5.0)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [0., 1., 2., 3., 4.]))
self.assertTrue(np.allclose(axis.extract(0.5, 'int'), [0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5]))
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(0 <= item <= 5) for item in extracted]))
# test axis object with discrete type
axis = Axis((1, 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'discr')
self.assertEqual(axis.axis_boe, (1, 5))
self.assertEqual(axis.count, 5)
self.assertEqual(axis.size, 5)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [1, 2, 3, 4, 5]))
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 2, 3, 4, 5]) for item in extracted]))
# test axis object with enumerate type
axis = Axis((1, 5, 7, 10, 'A', 'F'))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'enum')
self.assertEqual(axis.axis_boe, (1, 5, 7, 10, 'A', 'F'))
self.assertEqual(axis.count, 6)
self.assertEqual(axis.size, 6)
self.assertEqual(axis.extract(1, 'int'), [1, 5, 7, 10, 'A', 'F'])
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 5, 7, 10, 'A', 'F']) for item in extracted]))
def test_from_point(self):
"""测试从一个点生成一个space"""
# 生成一个space,指定space中的一个点以及distance,生成一个sub-space
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10., 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
print('create subspace from a point in space')
p = (3, 3)
distance = 2
subspace = s.from_point(p, distance)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'discr'])
self.assertEqual(subspace.dim, 2)
self.assertEqual(subspace.size, (4.0, 5))
self.assertEqual(subspace.shape, (np.inf, 5))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(1, 5), (1, 5)])
print('create subspace from a 6 dimensional discrete space')
s = Space(pars=[(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 65345616)
self.assertEqual(subspace.size, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.shape, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.count, 65345616)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace from a 6 dimensional continuous space')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 48000000)
self.assertEqual(subspace.size, (15.0, 20.0, 20.0, 20.0, 20.0, 20.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace with different distances on each dimension')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = [10, 5, 5, 10, 10, 5]
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 6000000)
self.assertEqual(subspace.size, (15.0, 10.0, 10.0, 20.0, 20.0, 10.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (195, 205), (145, 155), (140, 160), (140, 160), (145, 155)])
class TestCashPlan(unittest.TestCase):
def setUp(self):
self.cp1 = qt.CashPlan(['2012-01-01', '2010-01-01'], [10000, 20000], 0.1)
self.cp1.info()
self.cp2 = qt.CashPlan(['20100501'], 10000)
self.cp2.info()
self.cp3 = qt.CashPlan(pd.date_range(start='2019-01-01',
freq='Y',
periods=12),
[i * 1000 + 10000 for i in range(12)],
0.035)
self.cp3.info()
def test_creation(self):
self.assertIsInstance(self.cp1, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp2, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp3, qt.CashPlan, 'CashPlan object creation wrong')
# test __repr__()
print(self.cp1)
print(self.cp2)
print(self.cp3)
# test __str__()
self.cp1.info()
self.cp2.info()
self.cp3.info()
# test assersion errors
self.assertRaises(AssertionError, qt.CashPlan, '2016-01-01', [10000, 10000])
self.assertRaises(KeyError, qt.CashPlan, '2020-20-20', 10000)
def test_properties(self):
self.assertEqual(self.cp1.amounts, [20000, 10000], 'property wrong')
self.assertEqual(self.cp1.first_day, Timestamp('2010-01-01'))
self.assertEqual(self.cp1.last_day, Timestamp('2012-01-01'))
self.assertEqual(self.cp1.investment_count, 2)
self.assertEqual(self.cp1.period, 730)
self.assertEqual(self.cp1.dates, [Timestamp('2010-01-01'), Timestamp('2012-01-01')])
self.assertEqual(self.cp1.ir, 0.1)
self.assertAlmostEqual(self.cp1.closing_value, 34200)
self.assertAlmostEqual(self.cp2.closing_value, 10000)
self.assertAlmostEqual(self.cp3.closing_value, 220385.3483685)
self.assertIsInstance(self.cp1.plan, pd.DataFrame)
self.assertIsInstance(self.cp2.plan, pd.DataFrame)
self.assertIsInstance(self.cp3.plan, pd.DataFrame)
def test_operation(self):
cp_self_add = self.cp1 + self.cp1
cp_add = self.cp1 + self.cp2
cp_add_int = self.cp1 + 10000
cp_mul_int = self.cp1 * 2
cp_mul_float = self.cp2 * 1.5
cp_mul_time = 3 * self.cp2
cp_mul_time2 = 2 * self.cp1
cp_mul_time3 = 2 * self.cp3
cp_mul_float2 = 2. * self.cp3
self.assertIsInstance(cp_self_add, qt.CashPlan)
self.assertEqual(cp_self_add.amounts, [40000, 20000])
self.assertEqual(cp_add.amounts, [20000, 10000, 10000])
self.assertEqual(cp_add_int.amounts, [30000, 20000])
self.assertEqual(cp_mul_int.amounts, [40000, 20000])
self.assertEqual(cp_mul_float.amounts, [15000])
self.assertEqual(cp_mul_float.dates, [Timestamp('2010-05-01')])
self.assertEqual(cp_mul_time.amounts, [10000, 10000, 10000])
self.assertEqual(cp_mul_time.dates, [Timestamp('2010-05-01'),
Timestamp('2011-05-01'),
Timestamp('2012-04-30')])
self.assertEqual(cp_mul_time2.amounts, [20000, 10000, 20000, 10000])
self.assertEqual(cp_mul_time2.dates, [Timestamp('2010-01-01'),
Timestamp('2012-01-01'),
Timestamp('2014-01-01'),
Timestamp('2016-01-01')])
self.assertEqual(cp_mul_time3.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31'),
Timestamp('2031-12-29'),
Timestamp('2032-12-29'),
Timestamp('2033-12-29'),
Timestamp('2034-12-29'),
Timestamp('2035-12-29'),
Timestamp('2036-12-29'),
Timestamp('2037-12-29'),
Timestamp('2038-12-29'),
Timestamp('2039-12-29'),
Timestamp('2040-12-29'),
Timestamp('2041-12-29'),
Timestamp('2042-12-29')])
self.assertEqual(cp_mul_float2.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31')])
self.assertEqual(cp_mul_float2.amounts, [20000.0,
22000.0,
24000.0,
26000.0,
28000.0,
30000.0,
32000.0,
34000.0,
36000.0,
38000.0,
40000.0,
42000.0])
class TestPool(unittest.TestCase):
def setUp(self):
self.p = ResultPool(5)
self.items = ['first', 'second', (1, 2, 3), 'this', 24]
self.perfs = [1, 2, 3, 4, 5]
self.additional_result1 = ('abc', 12)
self.additional_result2 = ([1, 2], -1)
self.additional_result3 = (12, 5)
def test_create(self):
self.assertIsInstance(self.p, ResultPool)
def test_operation(self):
self.p.in_pool(self.additional_result1[0], self.additional_result1[1])
self.p.cut()
self.assertEqual(self.p.item_count, 1)
self.assertEqual(self.p.items, ['abc'])
for item, perf in zip(self.items, self.perfs):
self.p.in_pool(item, perf)
self.assertEqual(self.p.item_count, 6)
self.assertEqual(self.p.items, ['abc', 'first', 'second', (1, 2, 3), 'this', 24])
self.p.cut()
self.assertEqual(self.p.items, ['second', (1, 2, 3), 'this', 24, 'abc'])
self.assertEqual(self.p.perfs, [2, 3, 4, 5, 12])
self.p.in_pool(self.additional_result2[0], self.additional_result2[1])
self.p.in_pool(self.additional_result3[0], self.additional_result3[1])
self.assertEqual(self.p.item_count, 7)
self.p.cut(keep_largest=False)
self.assertEqual(self.p.items, [[1, 2], 'second', (1, 2, 3), 'this', 24])
self.assertEqual(self.p.perfs, [-1, 2, 3, 4, 5])
class TestCoreSubFuncs(unittest.TestCase):
"""Test all functions in core.py"""
def setUp(self):
pass
def test_input_to_list(self):
print('Testing input_to_list() function')
input_str = 'first'
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 3), ['first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 4), ['first', 'first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 2, None), ['first', 'first'])
input_list = ['first', 'second']
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 3), ['first', 'second', None])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 4, 'padder'), ['first', 'second', 'padder', 'padder'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 1), ['first', 'second'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, -5), ['first', 'second'])
def test_point_in_space(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
p2 = (-1, 3, 10)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
self.assertFalse(p2 in sp)
print(f'point {p2} is not in space {sp}')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)], 'conti, conti, enum')
p1 = (5.5, 3.2, 8)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
def test_space_in_space(self):
print('test if a space is in another space')
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
sp2 = Space([(0., 10.), (0., 10.), (0., 10.)])
self.assertTrue(sp2 in sp)
self.assertTrue(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is in space {sp2}\n'
f'they are equal to each other\n')
sp2 = Space([(0, 5.), (2, 7.), (3., 9.)])
self.assertTrue(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'{sp2} is a sub space of {sp}\n')
sp2 = Space([(0, 5), (2, 7), (3., 9)])
self.assertFalse(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)])
self.assertFalse(sp in sp2)
self.assertFalse(sp2 in sp)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
def test_space_around_centre(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
ssp = space_around_centre(space=sp, centre=p1, radius=1.2)
print(ssp.boes)
print('\ntest multiple diameters:')
self.assertEqual(ssp.boes, [(4.3, 6.7), (2.0, 4.4), (5.8, 8.2)])
ssp = space_around_centre(space=sp, centre=p1, radius=[1, 2, 1])
print(ssp.boes)
self.assertEqual(ssp.boes, [(4.5, 6.5), (1.2000000000000002, 5.2), (6.0, 8.0)])
print('\ntest points on edge:')
p2 = (5.5, 3.2, 10)
ssp = space_around_centre(space=sp, centre=p1, radius=3.9)
print(ssp.boes)
self.assertEqual(ssp.boes, [(1.6, 9.4), (0.0, 7.1), (3.1, 10.0)])
print('\ntest enum spaces')
sp = Space([(0, 100), range(40, 3, -2)], 'discr, enum')
p1 = [34, 12]
ssp = space_around_centre(space=sp, centre=p1, radius=5, ignore_enums=False)
self.assertEqual(ssp.boes, [(29, 39), (22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(ssp.boes)
print('\ntest enum space and ignore enum axis')
ssp = space_around_centre(space=sp, centre=p1, radius=5)
self.assertEqual(ssp.boes, [(29, 39),
(40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(sp.boes)
def test_time_string_format(self):
print('Testing qt.time_string_format() function:')
t = 3.14
self.assertEqual(time_str_format(t), '3s 140.0ms')
self.assertEqual(time_str_format(t, estimation=True), '3s ')
self.assertEqual(time_str_format(t, short_form=True), '3"140')
self.assertEqual(time_str_format(t, estimation=True, short_form=True), '3"')
t = 300.14
self.assertEqual(time_str_format(t), '5min 140.0ms')
self.assertEqual(time_str_format(t, estimation=True), '5min ')
self.assertEqual(time_str_format(t, short_form=True), "5'140")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "5'")
t = 7435.0014
self.assertEqual(time_str_format(t), '2hrs 3min 55s 1.4ms')
self.assertEqual(time_str_format(t, estimation=True), '2hrs ')
self.assertEqual(time_str_format(t, short_form=True), "2H3'55\"001")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "2H")
t = 88425.0509
self.assertEqual(time_str_format(t), '1days 33min 45s 50.9ms')
self.assertEqual(time_str_format(t, estimation=True), '1days ')
self.assertEqual(time_str_format(t, short_form=True), "1D33'45\"051")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "1D")
def test_get_stock_pool(self):
print(f'start test building stock pool function\n')
share_basics = stock_basic(fields='ts_code,symbol,name,area,industry,market,list_date,exchange')
print(f'\nselect all stocks by area')
stock_pool = qt.get_stock_pool(area='上海')
print(f'{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "上海"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].eq('上海').all())
print(f'\nselect all stocks by multiple areas')
stock_pool = qt.get_stock_pool(area='贵州,北京,天津')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are in list of ["贵州", "北京", "天津"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['贵州',
'北京',
'天津']).all())
print(f'\nselect all stocks by area and industry')
stock_pool = qt.get_stock_pool(area='四川', industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "四川", and industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['四川']).all())
print(f'\nselect all stocks by industry')
stock_pool = qt.get_stock_pool(industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stocks industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
print(f'\nselect all stocks by market')
stock_pool = qt.get_stock_pool(market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
print(f'\nselect all stocks by market and list date')
stock_pool = qt.get_stock_pool(date='2000-01-01', market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板", and list date after "2000-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('2000-01-01').all())
print(f'\nselect all stocks by list date')
stock_pool = qt.get_stock_pool(date='1997-01-01')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all list date after "1997-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1997-01-01').all())
print(f'\nselect all stocks by exchange')
stock_pool = qt.get_stock_pool(exchange='SSE')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['exchange'].eq('SSE').all())
print(f'\nselect all stocks by industry, area and list date')
industry_list = ['银行', '全国地产', '互联网', '环境保护', '区域地产',
'酒店餐饮', '运输设备', '综合类', '建筑工程', '玻璃',
'家用电器', '文教休闲', '其他商业', '元器件', 'IT设备',
'其他建材', '汽车服务', '火力发电', '医药商业', '汽车配件',
'广告包装', '轻工机械', '新型电力', '多元金融', '饲料']
area_list = ['深圳', '北京', '吉林', '江苏', '辽宁', '广东',
'安徽', '四川', '浙江', '湖南', '河北', '新疆',
'山东', '河南', '山西', '江西', '青海', '湖北',
'内蒙', '海南', '重庆', '陕西', '福建', '广西',
'上海']
stock_pool = qt.get_stock_pool(date='19980101',
industry=industry_list,
area=area_list)
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1998-01-01').all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(industry_list).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(area_list).all())
self.assertRaises(KeyError, qt.get_stock_pool, industry=25)
self.assertRaises(KeyError, qt.get_stock_pool, share_name='000300.SH')
self.assertRaises(KeyError, qt.get_stock_pool, markets='SSE')
class TestEvaluations(unittest.TestCase):
"""Test all evaluation functions in core.py"""
# 以下手动计算结果在Excel文件中
def setUp(self):
"""用np.random生成测试用数据,使用cumsum()模拟股票走势"""
self.test_data1 = pd.DataFrame([5.34892759, 5.65768696, 5.79227076, 5.56266871, 5.88189632,
6.24795001, 5.92755558, 6.38748165, 6.31331899, 5.86001665,
5.61048472, 5.30696736, 5.40406792, 5.03180571, 5.37886353,
5.78608307, 6.26540339, 6.59348026, 6.90943801, 6.70911677,
6.33015954, 6.06697417, 5.9752499, 6.45786408, 6.95273763,
6.7691991, 6.70355481, 6.28048969, 6.61344541, 6.24620003,
6.47409983, 6.4522311, 6.8773094, 6.99727832, 6.59262674,
6.59014938, 6.63758237, 6.38331869, 6.09902105, 6.35390109,
6.51993567, 6.87244592, 6.83963485, 7.08797815, 6.88003144,
6.83657323, 6.97819483, 7.01600276, 7.12554256, 7.58941523,
7.61014457, 7.21224091, 7.48174399, 7.66490854, 7.51371968,
7.11586198, 6.97147399, 6.67453301, 6.2042138, 6.33967015,
6.22187938, 5.98426993, 6.37096079, 6.55897161, 6.26422645,
6.69363762, 7.12668015, 6.83232926, 7.30524081, 7.4262041,
7.54031383, 7.17545919, 7.20659257, 7.44886016, 7.37094393,
6.88011022, 7.08142491, 6.74992833, 6.5967097, 6.21336693,
6.35565105, 6.82347596, 6.44773408, 6.84538053, 6.47966466,
6.09699528, 5.63927014, 6.01081024, 6.20585303, 6.60528206,
7.01594726, 7.03684251, 6.76574977, 7.08740846, 6.65336462,
7.07126686, 6.80058956, 6.79241977, 6.47843472, 6.39245474],
columns=['value'])
self.test_data2 = pd.DataFrame([5.09276527, 4.83828592, 4.6000911, 4.63170487, 4.63566451,
4.50546921, 4.96390044, 4.64557907, 4.25787855, 3.76585551,
3.38826334, 3.76243422, 4.06365426, 3.87084726, 3.91400935,
4.13438822, 4.27064542, 4.56776104, 5.03800296, 5.31070529,
5.39902276, 5.21186286, 5.05683114, 4.68842046, 5.11895168,
5.27151571, 5.72294993, 6.09961056, 6.26569635, 6.48806151,
6.16058885, 6.2582459, 6.38934791, 6.57831057, 6.19508831,
5.70155153, 5.20435735, 5.36538825, 5.40450056, 5.2227697,
5.37828693, 5.53058991, 6.02996797, 5.76802181, 5.66166713,
6.07988994, 5.61794367, 5.63218151, 6.10728013, 6.0324168,
6.27164431, 6.27551239, 6.52329665, 7.00470007, 7.34163113,
7.33699083, 7.67661334, 8.09395749, 7.68086668, 7.58341161,
7.46219819, 7.58671899, 7.19348298, 7.40088323, 7.47562005,
7.93342043, 8.2286081, 8.3521632, 8.43590025, 8.34977395,
8.57563095, 8.81586328, 9.08738649, 9.01542031, 8.8653815,
9.21763111, 9.04233017, 8.59533999, 8.47590075, 8.70857222,
8.78890756, 8.92697606, 9.35743773, 9.68280866, 10.15622021,
10.55908549, 10.6337894, 10.55197128, 10.65435176, 10.54611045,
10.19432562, 10.48320884, 10.36176768, 10.03186854, 10.23656092,
10.0062843, 10.13669686, 10.30758958, 9.87904176, 10.05126375],
columns=['value'])
self.test_data3 = pd.DataFrame([5.02851874, 5.20700348, 5.02410709, 5.49836387, 5.06834371,
5.10956737, 5.15314979, 5.02256472, 5.09746382, 5.23909247,
4.93410336, 4.96316186, 5.40026682, 5.7353255, 5.53438319,
5.79092139, 5.67528173, 5.89840855, 5.75379463, 6.10855386,
5.77322365, 5.84538021, 5.6103973, 5.7518655, 5.49729695,
5.13610628, 5.30524121, 5.68093462, 5.73251319, 6.04420783,
6.26929843, 6.59610234, 6.09872345, 6.25475121, 6.72927396,
6.91395783, 7.00693283, 7.36217783, 7.71516676, 7.67580263,
7.62477511, 7.73600568, 7.53457914, 7.46170277, 7.83658014,
8.11481319, 8.03705544, 7.64948845, 7.52043731, 7.67247943,
7.46511982, 7.43541798, 7.58856517, 7.9392717, 8.25406287,
7.77031632, 8.03223447, 7.86799055, 7.57630999, 7.33230519,
7.22378732, 6.85972264, 7.17548456, 7.5387846, 7.2392632,
6.8455644, 6.59557185, 6.6496796, 6.73685623, 7.18598015,
7.13619128, 6.88060157, 7.1399681, 7.30308077, 6.94942434,
7.0247815, 7.37567798, 7.50080197, 7.59719284, 7.14520561,
7.29913484, 7.79551341, 8.15497781, 8.40456095, 8.86516528,
8.53042688, 8.94268762, 8.52048006, 8.80036284, 8.91602364,
9.19953385, 8.70828953, 8.24613093, 8.18770453, 7.79548389,
7.68627967, 7.23205036, 6.98302636, 7.06515819, 6.95068113],
columns=['value'])
self.test_data4 = pd.DataFrame([4.97926539, 5.44016005, 5.45122915, 5.74485615, 5.45600553,
5.44858945, 5.2435413, 5.47315161, 5.58464303, 5.36179749,
5.38236326, 5.29614981, 5.76523508, 5.75102892, 6.15316618,
6.03852528, 6.01442228, 5.70510182, 5.22748133, 5.46762379,
5.78926267, 5.8221362, 5.61236849, 5.30615725, 5.24200611,
5.41042642, 5.59940342, 5.28306781, 4.99451932, 5.08799266,
5.38865647, 5.58229139, 5.33492845, 5.48206276, 5.09721379,
5.39190493, 5.29965087, 5.0374415, 5.50798022, 5.43107577,
5.22759507, 4.991809, 5.43153084, 5.39966868, 5.59916352,
5.66412137, 6.00611838, 5.63564902, 5.66723484, 5.29863863,
4.91115153, 5.3749929, 5.75082334, 6.08308148, 6.58091182,
6.77848803, 7.19588758, 7.64862286, 7.99818347, 7.91824794,
8.30341071, 8.45984973, 7.98700002, 8.18924931, 8.60755649,
8.66233396, 8.91018407, 9.0782739, 9.33515448, 8.95870245,
8.98426422, 8.50340317, 8.64916085, 8.93592407, 8.63145745,
8.65322862, 8.39543204, 8.37969997, 8.23394504, 8.04062872,
7.91259763, 7.57252171, 7.72670114, 7.74486117, 8.06908188,
7.99166889, 7.92155906, 8.39956136, 8.80181323, 8.47464091,
8.06557064, 7.87145573, 8.0237959, 8.39481998, 8.68525692,
8.81185461, 8.98632237, 9.0989835, 8.89787405, 8.86508591],
columns=['value'])
self.test_data5 = pd.DataFrame([4.50258923, 4.35142568, 4.07459514, 3.87791297, 3.73715985,
3.98455684, 4.07587908, 4.00042472, 4.28276612, 4.01362051,
4.13713565, 4.49312372, 4.48633159, 4.4641207, 4.13444605,
3.79107217, 4.22941629, 4.56548511, 4.92472163, 5.27723158,
5.67409193, 6.00176917, 5.88889928, 5.55256103, 5.39308314,
5.2610492, 5.30738908, 5.22222408, 4.90332238, 4.57499908,
4.96097146, 4.81531011, 4.39115442, 4.63200662, 5.04588813,
4.67866025, 5.01705123, 4.83562258, 4.60381702, 4.66187576,
4.41292828, 4.86604507, 4.42280124, 4.07517294, 4.16317319,
4.10316596, 4.42913598, 4.06609666, 3.96725913, 4.15965746,
4.12379564, 4.04054068, 3.84342851, 3.45902867, 3.17649855,
3.09773586, 3.5502119, 3.66396995, 3.66306483, 3.29131401,
2.79558533, 2.88319542, 3.03671098, 3.44645857, 3.88167161,
3.57961874, 3.60180276, 3.96702102, 4.05429995, 4.40056979,
4.05653231, 3.59600456, 3.60792477, 4.09989922, 3.73503663,
4.01892626, 3.94597242, 3.81466605, 3.71417992, 3.93767156,
4.42806557, 4.06988106, 4.03713636, 4.34408673, 4.79810156,
5.18115011, 4.89798406, 5.3960077, 5.72504875, 5.61894017,
5.1958197, 4.85275896, 5.17550207, 4.71548987, 4.62408567,
4.55488535, 4.36532649, 4.26031979, 4.25225607, 4.58627048],
columns=['value'])
self.test_data6 = pd.DataFrame([5.08639513, 5.05761083, 4.76160923, 4.62166504, 4.62923183,
4.25070173, 4.13447513, 3.90890013, 3.76687608, 3.43342482,
3.67648224, 3.6274775, 3.9385404, 4.39771627, 4.03199346,
3.93265288, 3.50059789, 3.3851961, 3.29743973, 3.2544872,
2.93692949, 2.70893003, 2.55461976, 2.20922332, 2.29054475,
2.2144714, 2.03726827, 2.39007617, 2.29866155, 2.40607111,
2.40440444, 2.79374649, 2.66541922, 2.27018079, 2.08505127,
2.55478864, 2.22415625, 2.58517923, 2.58802256, 2.94870959,
2.69301739, 2.19991535, 2.69473146, 2.64704637, 2.62753542,
2.14240825, 2.38565154, 1.94592117, 2.32243877, 2.69337246,
2.51283854, 2.62484451, 2.15559054, 2.35410875, 2.31219177,
1.96018265, 2.34711266, 2.58083322, 2.40290041, 2.20439791,
2.31472425, 2.16228248, 2.16439749, 2.20080737, 1.73293206,
1.9264407, 2.25089861, 2.69269101, 2.59296687, 2.1420998,
1.67819153, 1.98419023, 2.14479494, 1.89055376, 1.96720648,
1.9916694, 2.37227761, 2.14446036, 2.34573903, 1.86162546,
2.1410721, 2.39204939, 2.52529064, 2.47079939, 2.9299031,
3.09452923, 2.93276708, 3.21731309, 3.06248964, 2.90413406,
2.67844632, 2.45621213, 2.41463398, 2.7373913, 3.14917045,
3.4033949, 3.82283446, 4.02285451, 3.7619638, 4.10346795],
columns=['value'])
self.test_data7 = pd.DataFrame([4.75233583, 4.47668283, 4.55894263, 4.61765848, 4.622892,
4.58941116, 4.32535872, 3.88112797, 3.47237806, 3.50898953,
3.82530406, 3.6718017, 3.78918195, 4.1800752, 4.01818557,
4.40822582, 4.65474654, 4.89287256, 4.40879274, 4.65505126,
4.36876403, 4.58418934, 4.75687172, 4.3689799, 4.16126498,
4.0203982, 3.77148242, 3.38198096, 3.07261764, 2.9014741,
2.5049543, 2.756105, 2.28779058, 2.16986991, 1.8415962,
1.83319008, 2.20898291, 2.00128981, 1.75747025, 1.26676663,
1.40316876, 1.11126484, 1.60376367, 1.22523829, 1.58816681,
1.49705679, 1.80244138, 1.55128293, 1.35339409, 1.50985759,
1.0808451, 1.05892796, 1.43414812, 1.43039101, 1.73631655,
1.43940867, 1.82864425, 1.71088265, 2.12015154, 2.45417128,
2.84777618, 2.7925612, 2.90975121, 3.25920745, 3.13801182,
3.52733677, 3.65468491, 3.69395211, 3.49862035, 3.24786017,
3.64463138, 4.00331929, 3.62509565, 3.78013949, 3.4174012,
3.76312271, 3.62054004, 3.67206716, 3.60596058, 3.38636199,
3.42580676, 3.32921095, 3.02976759, 3.28258676, 3.45760838,
3.24917528, 2.94618304, 2.86980011, 2.63191259, 2.39566759,
2.53159917, 2.96273967, 3.25626185, 2.97425402, 3.16412191,
3.58280763, 3.23257727, 3.62353556, 3.12806399, 2.92532313],
columns=['value'])
# 建立一个长度为 500 个数据点的测试数据, 用于测试数据点多于250个的情况下的评价过程
self.long_data = pd.DataFrame([ 9.879, 9.916, 10.109, 10.214, 10.361, 10.768, 10.594, 10.288,
10.082, 9.994, 10.125, 10.126, 10.384, 10.734, 10.4 , 10.87 ,
11.338, 11.061, 11.415, 11.724, 12.077, 12.196, 12.064, 12.423,
12.19 , 11.729, 11.677, 11.448, 11.485, 10.989, 11.242, 11.239,
11.113, 11.075, 11.471, 11.745, 11.754, 11.782, 12.079, 11.97 ,
12.178, 11.95 , 12.438, 12.612, 12.804, 12.952, 12.612, 12.867,
12.832, 12.832, 13.015, 13.315, 13.249, 12.904, 12.776, 12.64 ,
12.543, 12.287, 12.225, 11.844, 11.985, 11.945, 11.542, 11.871,
12.245, 12.228, 12.362, 11.899, 11.962, 12.374, 12.816, 12.649,
12.252, 12.579, 12.3 , 11.988, 12.177, 12.312, 12.744, 12.599,
12.524, 12.82 , 12.67 , 12.876, 12.986, 13.271, 13.606, 13.82 ,
14.161, 13.833, 13.831, 14.137, 13.705, 13.414, 13.037, 12.759,
12.642, 12.948, 13.297, 13.483, 13.836, 14.179, 13.709, 13.655,
13.198, 13.508, 13.953, 14.387, 14.043, 13.987, 13.561, 13.391,
12.923, 12.555, 12.503, 12.292, 11.877, 12.34 , 12.141, 11.687,
11.992, 12.458, 12.131, 11.75 , 11.739, 11.263, 11.762, 11.976,
11.578, 11.854, 12.136, 12.422, 12.311, 12.56 , 12.879, 12.861,
12.973, 13.235, 13.53 , 13.531, 13.137, 13.166, 13.31 , 13.103,
13.007, 12.643, 12.69 , 12.216, 12.385, 12.046, 12.321, 11.9 ,
11.772, 11.816, 11.871, 11.59 , 11.518, 11.94 , 11.803, 11.924,
12.183, 12.136, 12.361, 12.406, 11.932, 11.684, 11.292, 11.388,
11.874, 12.184, 12.002, 12.16 , 11.741, 11.26 , 11.123, 11.534,
11.777, 11.407, 11.275, 11.679, 11.62 , 11.218, 11.235, 11.352,
11.366, 11.061, 10.661, 10.582, 10.899, 11.352, 11.792, 11.475,
11.263, 11.538, 11.183, 10.936, 11.399, 11.171, 11.214, 10.89 ,
10.728, 11.191, 11.646, 11.62 , 11.195, 11.178, 11.18 , 10.956,
11.205, 10.87 , 11.098, 10.639, 10.487, 10.507, 10.92 , 10.558,
10.119, 9.882, 9.573, 9.515, 9.845, 9.852, 9.495, 9.726,
10.116, 10.452, 10.77 , 11.225, 10.92 , 10.824, 11.096, 11.542,
11.06 , 10.568, 10.585, 10.884, 10.401, 10.068, 9.964, 10.285,
10.239, 10.036, 10.417, 10.132, 9.839, 9.556, 9.084, 9.239,
9.304, 9.067, 8.587, 8.471, 8.007, 8.321, 8.55 , 9.008,
9.138, 9.088, 9.434, 9.156, 9.65 , 9.431, 9.654, 10.079,
10.411, 10.865, 10.51 , 10.205, 10.519, 10.367, 10.855, 10.642,
10.298, 10.622, 10.173, 9.792, 9.995, 9.904, 9.771, 9.597,
9.506, 9.212, 9.688, 10.032, 9.723, 9.839, 9.918, 10.332,
10.236, 9.989, 10.192, 10.685, 10.908, 11.275, 11.72 , 12.158,
12.045, 12.244, 12.333, 12.246, 12.552, 12.958, 13.11 , 13.53 ,
13.123, 13.138, 13.57 , 13.389, 13.511, 13.759, 13.698, 13.744,
13.467, 13.795, 13.665, 13.377, 13.423, 13.772, 13.295, 13.073,
12.718, 12.388, 12.399, 12.185, 11.941, 11.818, 11.465, 11.811,
12.163, 11.86 , 11.935, 11.809, 12.145, 12.624, 12.768, 12.321,
12.277, 11.889, 12.11 , 12.606, 12.943, 12.945, 13.112, 13.199,
13.664, 14.051, 14.189, 14.339, 14.611, 14.656, 15.112, 15.086,
15.263, 15.021, 15.346, 15.572, 15.607, 15.983, 16.151, 16.215,
16.096, 16.089, 16.32 , 16.59 , 16.657, 16.752, 16.583, 16.743,
16.373, 16.662, 16.243, 16.163, 16.491, 16.958, 16.977, 17.225,
17.637, 17.344, 17.684, 17.892, 18.036, 18.182, 17.803, 17.588,
17.101, 17.538, 17.124, 16.787, 17.167, 17.138, 16.955, 17.148,
17.135, 17.635, 17.718, 17.675, 17.622, 17.358, 17.754, 17.729,
17.576, 17.772, 18.239, 18.441, 18.729, 18.319, 18.608, 18.493,
18.069, 18.122, 18.314, 18.423, 18.709, 18.548, 18.384, 18.391,
17.988, 17.986, 17.653, 17.249, 17.298, 17.06 , 17.36 , 17.108,
17.348, 17.596, 17.46 , 17.635, 17.275, 17.291, 16.933, 17.337,
17.231, 17.146, 17.148, 16.751, 16.891, 17.038, 16.735, 16.64 ,
16.231, 15.957, 15.977, 16.077, 16.054, 15.797, 15.67 , 15.911,
16.077, 16.17 , 15.722, 15.258, 14.877, 15.138, 15. , 14.811,
14.698, 14.407, 14.583, 14.704, 15.153, 15.436, 15.634, 15.453,
15.877, 15.696, 15.563, 15.927, 16.255, 16.696, 16.266, 16.698,
16.365, 16.493, 16.973, 16.71 , 16.327, 16.605, 16.486, 16.846,
16.935, 17.21 , 17.389, 17.546, 17.773, 17.641, 17.485, 17.794,
17.354, 16.904, 16.675, 16.43 , 16.898, 16.819, 16.921, 17.201,
17.617, 17.368, 17.864, 17.484],
columns=['value'])
self.long_bench = pd.DataFrame([ 9.7 , 10.179, 10.321, 9.855, 9.936, 10.096, 10.331, 10.662,
10.59 , 11.031, 11.154, 10.945, 10.625, 10.233, 10.284, 10.252,
10.221, 10.352, 10.444, 10.773, 10.904, 11.104, 10.797, 10.55 ,
10.943, 11.352, 11.641, 11.983, 11.696, 12.138, 12.365, 12.379,
11.969, 12.454, 12.947, 13.119, 13.013, 12.763, 12.632, 13.034,
12.681, 12.561, 12.938, 12.867, 13.202, 13.132, 13.539, 13.91 ,
13.456, 13.692, 13.771, 13.904, 14.069, 13.728, 13.97 , 14.228,
13.84 , 14.041, 13.963, 13.689, 13.543, 13.858, 14.118, 13.987,
13.611, 14.028, 14.229, 14.41 , 14.74 , 15.03 , 14.915, 15.207,
15.354, 15.665, 15.877, 15.682, 15.625, 15.175, 15.105, 14.893,
14.86 , 15.097, 15.178, 15.293, 15.238, 15. , 15.283, 14.994,
14.907, 14.664, 14.888, 15.297, 15.313, 15.368, 14.956, 14.802,
14.506, 14.257, 14.619, 15.019, 15.049, 14.625, 14.894, 14.978,
15.434, 15.578, 16.038, 16.107, 16.277, 16.365, 16.204, 16.465,
16.401, 16.895, 17.057, 16.621, 16.225, 16.075, 15.863, 16.292,
16.551, 16.724, 16.817, 16.81 , 17.192, 16.86 , 16.745, 16.707,
16.552, 16.133, 16.301, 16.08 , 15.81 , 15.75 , 15.909, 16.127,
16.457, 16.204, 16.329, 16.748, 16.624, 17.011, 16.548, 16.831,
16.653, 16.791, 16.57 , 16.778, 16.928, 16.932, 17.22 , 16.876,
17.301, 17.422, 17.689, 17.316, 17.547, 17.534, 17.409, 17.669,
17.416, 17.859, 17.477, 17.307, 17.245, 17.352, 17.851, 17.412,
17.144, 17.138, 17.085, 16.926, 16.674, 16.854, 17.064, 16.95 ,
16.609, 16.957, 16.498, 16.552, 16.175, 15.858, 15.697, 15.781,
15.583, 15.36 , 15.558, 16.046, 15.968, 15.905, 16.358, 16.783,
17.048, 16.762, 17.224, 17.363, 17.246, 16.79 , 16.608, 16.423,
15.991, 15.527, 15.147, 14.759, 14.792, 15.206, 15.148, 15.046,
15.429, 14.999, 15.407, 15.124, 14.72 , 14.713, 15.022, 15.092,
14.982, 15.001, 14.734, 14.713, 14.841, 14.562, 15.005, 15.483,
15.472, 15.277, 15.503, 15.116, 15.12 , 15.442, 15.476, 15.789,
15.36 , 15.764, 16.218, 16.493, 16.642, 17.088, 16.816, 16.645,
16.336, 16.511, 16.2 , 15.994, 15.86 , 15.929, 16.316, 16.416,
16.746, 17.173, 17.531, 17.627, 17.407, 17.49 , 17.768, 17.509,
17.795, 18.147, 18.63 , 18.945, 19.021, 19.518, 19.6 , 19.744,
19.63 , 19.32 , 18.933, 19.297, 19.598, 19.446, 19.236, 19.198,
19.144, 19.159, 19.065, 19.032, 18.586, 18.272, 18.119, 18.3 ,
17.894, 17.744, 17.5 , 17.083, 17.092, 16.864, 16.453, 16.31 ,
16.681, 16.342, 16.447, 16.715, 17.068, 17.067, 16.822, 16.673,
16.675, 16.592, 16.686, 16.397, 15.902, 15.597, 15.357, 15.162,
15.348, 15.603, 15.283, 15.257, 15.082, 14.621, 14.366, 14.039,
13.957, 14.141, 13.854, 14.243, 14.414, 14.033, 13.93 , 14.104,
14.461, 14.249, 14.053, 14.165, 14.035, 14.408, 14.501, 14.019,
14.265, 14.67 , 14.797, 14.42 , 14.681, 15.16 , 14.715, 14.292,
14.411, 14.656, 15.094, 15.366, 15.055, 15.198, 14.762, 14.294,
13.854, 13.811, 13.549, 13.927, 13.897, 13.421, 13.037, 13.32 ,
13.721, 13.511, 13.999, 13.529, 13.418, 13.881, 14.326, 14.362,
13.987, 14.015, 13.599, 13.343, 13.307, 13.689, 13.851, 13.404,
13.577, 13.395, 13.619, 13.195, 12.904, 12.553, 12.294, 12.649,
12.425, 11.967, 12.062, 11.71 , 11.645, 12.058, 12.136, 11.749,
11.953, 12.401, 12.044, 11.901, 11.631, 11.396, 11.036, 11.244,
10.864, 11.207, 11.135, 11.39 , 11.723, 12.084, 11.8 , 11.471,
11.33 , 11.504, 11.295, 11.3 , 10.901, 10.494, 10.825, 11.054,
10.866, 10.713, 10.875, 10.846, 10.947, 11.422, 11.158, 10.94 ,
10.521, 10.36 , 10.411, 10.792, 10.472, 10.305, 10.525, 10.853,
10.556, 10.72 , 10.54 , 10.583, 10.299, 10.061, 10.004, 9.903,
9.796, 9.472, 9.246, 9.54 , 9.456, 9.177, 9.484, 9.557,
9.493, 9.968, 9.536, 9.39 , 8.922, 8.423, 8.518, 8.686,
8.771, 9.098, 9.281, 8.858, 9.027, 8.553, 8.784, 8.996,
9.379, 9.846, 9.855, 9.502, 9.608, 9.761, 9.409, 9.4 ,
9.332, 9.34 , 9.284, 8.844, 8.722, 8.376, 8.775, 8.293,
8.144, 8.63 , 8.831, 8.957, 9.18 , 9.601, 9.695, 10.018,
9.841, 9.743, 9.292, 8.85 , 9.316, 9.288, 9.519, 9.738,
9.289, 9.785, 9.804, 10.06 , 10.188, 10.095, 9.739, 9.881,
9.7 , 9.991, 10.391, 10.002],
columns=['value'])
def test_performance_stats(self):
"""test the function performance_statistics()
"""
pass
def test_fv(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_fv(self.test_data1), 6.39245474)
self.assertAlmostEqual(eval_fv(self.test_data2), 10.05126375)
self.assertAlmostEqual(eval_fv(self.test_data3), 6.95068113)
self.assertAlmostEqual(eval_fv(self.test_data4), 8.86508591)
self.assertAlmostEqual(eval_fv(self.test_data5), 4.58627048)
self.assertAlmostEqual(eval_fv(self.test_data6), 4.10346795)
self.assertAlmostEqual(eval_fv(self.test_data7), 2.92532313)
self.assertAlmostEqual(eval_fv(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
def test_max_drawdown(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_max_drawdown(self.test_data1)[0], 0.264274308)
self.assertEqual(eval_max_drawdown(self.test_data1)[1], 53)
self.assertEqual(eval_max_drawdown(self.test_data1)[2], 86)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data1)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data2)[0], 0.334690849)
self.assertEqual(eval_max_drawdown(self.test_data2)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data2)[2], 10)
self.assertEqual(eval_max_drawdown(self.test_data2)[3], 19)
self.assertAlmostEqual(eval_max_drawdown(self.test_data3)[0], 0.244452899)
self.assertEqual(eval_max_drawdown(self.test_data3)[1], 90)
self.assertEqual(eval_max_drawdown(self.test_data3)[2], 99)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data3)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data4)[0], 0.201849684)
self.assertEqual(eval_max_drawdown(self.test_data4)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4)[2], 50)
self.assertEqual(eval_max_drawdown(self.test_data4)[3], 54)
self.assertAlmostEqual(eval_max_drawdown(self.test_data5)[0], 0.534206456)
self.assertEqual(eval_max_drawdown(self.test_data5)[1], 21)
self.assertEqual(eval_max_drawdown(self.test_data5)[2], 60)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data5)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data6)[0], 0.670062689)
self.assertEqual(eval_max_drawdown(self.test_data6)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data6)[2], 70)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data6)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data7)[0], 0.783577449)
self.assertEqual(eval_max_drawdown(self.test_data7)[1], 17)
self.assertEqual(eval_max_drawdown(self.test_data7)[2], 51)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data7)[3]))
self.assertEqual(eval_max_drawdown(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
# test max drawdown == 0:
# TODO: investigate: how does divide by zero change?
self.assertAlmostEqual(eval_max_drawdown(self.test_data4 - 5)[0], 1.0770474121951792)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[2], 50)
def test_info_ratio(self):
reference = self.test_data1
self.assertAlmostEqual(eval_info_ratio(self.test_data2, reference, 'value'), 0.075553316)
self.assertAlmostEqual(eval_info_ratio(self.test_data3, reference, 'value'), 0.018949457)
self.assertAlmostEqual(eval_info_ratio(self.test_data4, reference, 'value'), 0.056328143)
self.assertAlmostEqual(eval_info_ratio(self.test_data5, reference, 'value'), -0.004270068)
self.assertAlmostEqual(eval_info_ratio(self.test_data6, reference, 'value'), 0.009198027)
self.assertAlmostEqual(eval_info_ratio(self.test_data7, reference, 'value'), -0.000890283)
def test_volatility(self):
self.assertAlmostEqual(eval_volatility(self.test_data1), 0.748646166)
self.assertAlmostEqual(eval_volatility(self.test_data2), 0.75527442)
self.assertAlmostEqual(eval_volatility(self.test_data3), 0.654188853)
self.assertAlmostEqual(eval_volatility(self.test_data4), 0.688375814)
self.assertAlmostEqual(eval_volatility(self.test_data5), 1.089989522)
self.assertAlmostEqual(eval_volatility(self.test_data6), 1.775419308)
self.assertAlmostEqual(eval_volatility(self.test_data7), 1.962758406)
self.assertAlmostEqual(eval_volatility(self.test_data1, logarithm=False), 0.750993311)
self.assertAlmostEqual(eval_volatility(self.test_data2, logarithm=False), 0.75571473)
self.assertAlmostEqual(eval_volatility(self.test_data3, logarithm=False), 0.655331424)
self.assertAlmostEqual(eval_volatility(self.test_data4, logarithm=False), 0.692683021)
self.assertAlmostEqual(eval_volatility(self.test_data5, logarithm=False), 1.09602969)
self.assertAlmostEqual(eval_volatility(self.test_data6, logarithm=False), 1.774789504)
self.assertAlmostEqual(eval_volatility(self.test_data7, logarithm=False), 2.003329156)
self.assertEqual(eval_volatility(pd.DataFrame()), -np.inf)
self.assertRaises(AssertionError, eval_volatility, [1, 2, 3])
# 测试长数据的Volatility计算
expected_volatility = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
0.39955371, 0.39974258, 0.40309866, 0.40486593, 0.4055514 ,
0.40710639, 0.40708157, 0.40609006, 0.4073625 , 0.40835305,
0.41155304, 0.41218193, 0.41207489, 0.41300276, 0.41308415,
0.41292392, 0.41207645, 0.41238397, 0.41229291, 0.41164056,
0.41316317, 0.41348842, 0.41462249, 0.41474574, 0.41652625,
0.41649176, 0.41701556, 0.4166593 , 0.41684221, 0.41491689,
0.41435209, 0.41549087, 0.41849338, 0.41998049, 0.41959106,
0.41907311, 0.41916103, 0.42120773, 0.42052391, 0.42111225,
0.42124589, 0.42356445, 0.42214672, 0.42324022, 0.42476639,
0.42621689, 0.42549439, 0.42533678, 0.42539414, 0.42545038,
0.42593637, 0.42652095, 0.42665489, 0.42699563, 0.42798159,
0.42784512, 0.42898006, 0.42868781, 0.42874188, 0.42789631,
0.4277768 , 0.42776827, 0.42685216, 0.42660989, 0.42563155,
0.42618281, 0.42606281, 0.42505222, 0.42653242, 0.42555378,
0.42500842, 0.42561939, 0.42442059, 0.42395414, 0.42384356,
0.42319135, 0.42397497, 0.42488579, 0.42449729, 0.42508766,
0.42509878, 0.42456616, 0.42535577, 0.42681884, 0.42688552,
0.42779918, 0.42706058, 0.42792887, 0.42762114, 0.42894045,
0.42977398, 0.42919859, 0.42829041, 0.42780946, 0.42825318,
0.42858952, 0.42858315, 0.42805601, 0.42764751, 0.42744107,
0.42775518, 0.42707283, 0.4258592 , 0.42615335, 0.42526286,
0.4248906 , 0.42368986, 0.4232565 , 0.42265079, 0.42263954,
0.42153046, 0.42132051, 0.41995353, 0.41916605, 0.41914271,
0.41876945, 0.41740175, 0.41583884, 0.41614026, 0.41457908,
0.41472411, 0.41310876, 0.41261041, 0.41212369, 0.41211677,
0.4100645 , 0.40852504, 0.40860297, 0.40745338, 0.40698661,
0.40644546, 0.40591375, 0.40640744, 0.40620663, 0.40656649,
0.40727154, 0.40797605, 0.40807137, 0.40808913, 0.40809676,
0.40711767, 0.40724628, 0.40713077, 0.40772698, 0.40765157,
0.40658297, 0.4065991 , 0.405011 , 0.40537645, 0.40432626,
0.40390177, 0.40237701, 0.40291623, 0.40301797, 0.40324145,
0.40312864, 0.40328316, 0.40190955, 0.40246506, 0.40237663,
0.40198407, 0.401969 , 0.40185623, 0.40198313, 0.40005643,
0.39940743, 0.39850438, 0.39845398, 0.39695093, 0.39697295,
0.39663201, 0.39675444, 0.39538699, 0.39331959, 0.39326074,
0.39193287, 0.39157266, 0.39021327, 0.39062591, 0.38917591,
0.38976991, 0.38864187, 0.38872158, 0.38868096, 0.38868377,
0.38842057, 0.38654784, 0.38649517, 0.38600464, 0.38408115,
0.38323049, 0.38260215, 0.38207663, 0.38142669, 0.38003262,
0.37969367, 0.37768092, 0.37732108, 0.37741991, 0.37617779,
0.37698504, 0.37606784, 0.37499276, 0.37533731, 0.37350437,
0.37375172, 0.37385382, 0.37384003, 0.37338938, 0.37212288,
0.37273075, 0.370559 , 0.37038506, 0.37062153, 0.36964661,
0.36818564, 0.3656634 , 0.36539259, 0.36428672, 0.36502487,
0.3647148 , 0.36551435, 0.36409919, 0.36348181, 0.36254383,
0.36166601, 0.36142665, 0.35954942, 0.35846915, 0.35886759,
0.35813867, 0.35642888, 0.35375231, 0.35061783, 0.35078463,
0.34995508, 0.34688918, 0.34548257, 0.34633158, 0.34622833,
0.34652111, 0.34622774, 0.34540951, 0.34418809, 0.34276593,
0.34160916, 0.33811193, 0.33822709, 0.3391685 , 0.33883381])
test_volatility = eval_volatility(self.long_data)
test_volatility_roll = self.long_data['volatility'].values
self.assertAlmostEqual(test_volatility, np.nanmean(expected_volatility))
self.assertTrue(np.allclose(expected_volatility, test_volatility_roll, equal_nan=True))
def test_sharp(self):
self.assertAlmostEqual(eval_sharp(self.test_data1, 5, 0), 0.06135557)
self.assertAlmostEqual(eval_sharp(self.test_data2, 5, 0), 0.167858667)
self.assertAlmostEqual(eval_sharp(self.test_data3, 5, 0), 0.09950547)
self.assertAlmostEqual(eval_sharp(self.test_data4, 5, 0), 0.154928241)
self.assertAlmostEqual(eval_sharp(self.test_data5, 5, 0.002), 0.007868673)
self.assertAlmostEqual(eval_sharp(self.test_data6, 5, 0.002), 0.018306537)
self.assertAlmostEqual(eval_sharp(self.test_data7, 5, 0.002), 0.006259971)
# 测试长数据的sharp率计算
expected_sharp = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.02346815, -0.02618783, -0.03763912, -0.03296276, -0.03085698,
-0.02851101, -0.02375842, -0.02016746, -0.01107885, -0.01426613,
-0.00787204, -0.01135784, -0.01164232, -0.01003481, -0.00022512,
-0.00046792, -0.01209378, -0.01278892, -0.01298135, -0.01938214,
-0.01671044, -0.02120509, -0.0244281 , -0.02416067, -0.02763238,
-0.027579 , -0.02372774, -0.02215294, -0.02467094, -0.02091266,
-0.02590194, -0.03049876, -0.02077131, -0.01483653, -0.02488144,
-0.02671638, -0.02561547, -0.01957986, -0.02479803, -0.02703162,
-0.02658087, -0.01641755, -0.01946472, -0.01647757, -0.01280889,
-0.00893643, -0.00643275, -0.00698457, -0.00549962, -0.00654677,
-0.00494757, -0.0035633 , -0.00109037, 0.00750654, 0.00451208,
0.00625502, 0.01221367, 0.01326454, 0.01535037, 0.02269538,
0.02028715, 0.02127712, 0.02333264, 0.02273159, 0.01670643,
0.01376513, 0.01265342, 0.02211647, 0.01612449, 0.00856706,
-0.00077147, -0.00268848, 0.00210993, -0.00443934, -0.00411912,
-0.0018756 , -0.00867461, -0.00581601, -0.00660835, -0.00861137,
-0.00678614, -0.01188408, -0.00589617, -0.00244323, -0.00201891,
-0.01042846, -0.01471016, -0.02167034, -0.02258554, -0.01306809,
-0.00909086, -0.01233746, -0.00595166, -0.00184208, 0.00750497,
0.01481886, 0.01761972, 0.01562886, 0.01446414, 0.01285826,
0.01357719, 0.00967613, 0.01636272, 0.01458437, 0.02280183,
0.02151903, 0.01700276, 0.01597368, 0.02114336, 0.02233297,
0.02585631, 0.02768459, 0.03519235, 0.04204535, 0.04328161,
0.04672855, 0.05046191, 0.04619848, 0.04525853, 0.05381529,
0.04598861, 0.03947394, 0.04665006, 0.05586077, 0.05617728,
0.06495018, 0.06205172, 0.05665466, 0.06500615, 0.0632062 ,
0.06084328, 0.05851466, 0.05659229, 0.05159347, 0.0432977 ,
0.0474047 , 0.04231723, 0.03613176, 0.03618391, 0.03591012,
0.03885674, 0.0402686 , 0.03846423, 0.04534014, 0.04721458,
0.05130912, 0.05026281, 0.05394312, 0.05529349, 0.05949243,
0.05463304, 0.06195165, 0.06767606, 0.06880985, 0.07048996,
0.07078815, 0.07420767, 0.06773439, 0.0658441 , 0.06470875,
0.06302349, 0.06456876, 0.06411282, 0.06216669, 0.067094 ,
0.07055075, 0.07254976, 0.07119253, 0.06173308, 0.05393352,
0.05681246, 0.05250643, 0.06099845, 0.0655544 , 0.06977334,
0.06636514, 0.06177949, 0.06869908, 0.06719767, 0.06178738,
0.05915714, 0.06882277, 0.06756821, 0.06507994, 0.06489791,
0.06553941, 0.073123 , 0.07576757, 0.06805446, 0.06063571,
0.05033801, 0.05206971, 0.05540306, 0.05249118, 0.05755587,
0.0586174 , 0.05051288, 0.0564852 , 0.05757284, 0.06358355,
0.06130082, 0.04925482, 0.03834472, 0.04163981, 0.04648316,
0.04457858, 0.04324626, 0.04328791, 0.04156207, 0.04818652,
0.04972634, 0.06024123, 0.06489556, 0.06255485, 0.06069815,
0.06466389, 0.07081163, 0.07895358, 0.0881782 , 0.09374151,
0.08336506, 0.08764795, 0.09080174, 0.08808926, 0.08641158,
0.07811943, 0.06885318, 0.06479503, 0.06851185, 0.07382819,
0.07047903, 0.06658251, 0.07638379, 0.08667974, 0.08867918,
0.08245323, 0.08961866, 0.09905298, 0.0961908 , 0.08562706,
0.0839014 , 0.0849072 , 0.08338395, 0.08783487, 0.09463609,
0.10332336, 0.11806497, 0.11220297, 0.11589097, 0.11678405])
test_sharp = eval_sharp(self.long_data, 5, 0.00035)
self.assertAlmostEqual(np.nanmean(expected_sharp), test_sharp)
self.assertTrue(np.allclose(self.long_data['sharp'].values, expected_sharp, equal_nan=True))
def test_beta(self):
reference = self.test_data1
self.assertAlmostEqual(eval_beta(self.test_data2, reference, 'value'), -0.017148939)
self.assertAlmostEqual(eval_beta(self.test_data3, reference, 'value'), -0.042204233)
self.assertAlmostEqual(eval_beta(self.test_data4, reference, 'value'), -0.15652986)
self.assertAlmostEqual(eval_beta(self.test_data5, reference, 'value'), -0.049195532)
self.assertAlmostEqual(eval_beta(self.test_data6, reference, 'value'), -0.026995082)
self.assertAlmostEqual(eval_beta(self.test_data7, reference, 'value'), -0.01147809)
self.assertRaises(TypeError, eval_beta, [1, 2, 3], reference, 'value')
self.assertRaises(TypeError, eval_beta, self.test_data3, [1, 2, 3], 'value')
self.assertRaises(KeyError, eval_beta, self.test_data3, reference, 'not_found_value')
# 测试长数据的beta计算
expected_beta = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.04988841, -0.05127618, -0.04692104, -0.04272652, -0.04080598,
-0.0493347 , -0.0460858 , -0.0416761 , -0.03691527, -0.03724924,
-0.03678865, -0.03987324, -0.03488321, -0.02567672, -0.02690303,
-0.03010128, -0.02437967, -0.02571932, -0.02455681, -0.02839811,
-0.03358653, -0.03396697, -0.03466321, -0.03050966, -0.0247583 ,
-0.01629325, -0.01880895, -0.01480403, -0.01348783, -0.00544294,
-0.00648176, -0.00467036, -0.01135331, -0.0156841 , -0.02340763,
-0.02615705, -0.02730771, -0.02906174, -0.02860664, -0.02412914,
-0.02066416, -0.01744816, -0.02185133, -0.02145285, -0.02681765,
-0.02827694, -0.02394581, -0.02744096, -0.02778825, -0.02703065,
-0.03160023, -0.03615371, -0.03681072, -0.04265126, -0.04344738,
-0.04232421, -0.04705272, -0.04533344, -0.04605934, -0.05272737,
-0.05156463, -0.05134196, -0.04730733, -0.04425352, -0.03869831,
-0.04159571, -0.04223998, -0.04346747, -0.04229844, -0.04740093,
-0.04992507, -0.04621232, -0.04477644, -0.0486915 , -0.04598224,
-0.04943463, -0.05006391, -0.05362256, -0.04994067, -0.05464769,
-0.05443275, -0.05513493, -0.05173594, -0.04500994, -0.04662891,
-0.03903505, -0.0419592 , -0.04307773, -0.03925718, -0.03711574,
-0.03992631, -0.0433058 , -0.04533641, -0.0461183 , -0.05600344,
-0.05758377, -0.05959874, -0.05605942, -0.06002859, -0.06253002,
-0.06747014, -0.06427915, -0.05931947, -0.05769974, -0.04791515,
-0.05175088, -0.05748039, -0.05385232, -0.05072975, -0.05052637,
-0.05125567, -0.05005785, -0.05325104, -0.04977727, -0.04947867,
-0.05148544, -0.05739156, -0.05742069, -0.06047279, -0.0558414 ,
-0.06086126, -0.06265151, -0.06411129, -0.06828052, -0.06781762,
-0.07083409, -0.07211207, -0.06799162, -0.06913295, -0.06775162,
-0.0696265 , -0.06678248, -0.06867502, -0.06581961, -0.07055823,
-0.06448184, -0.06097973, -0.05795587, -0.0618383 , -0.06130145,
-0.06050652, -0.05936661, -0.05749424, -0.0499 , -0.05050495,
-0.04962687, -0.05033439, -0.05070116, -0.05422009, -0.05369759,
-0.05548943, -0.05907353, -0.05933035, -0.05927918, -0.06227663,
-0.06011455, -0.05650432, -0.05828134, -0.05620949, -0.05715323,
-0.05482478, -0.05387113, -0.05095559, -0.05377999, -0.05334267,
-0.05220438, -0.04001521, -0.03892434, -0.03660782, -0.04282708,
-0.04324623, -0.04127048, -0.04227559, -0.04275226, -0.04347049,
-0.04125853, -0.03806295, -0.0330632 , -0.03155531, -0.03277152,
-0.03304518, -0.03878731, -0.03830672, -0.03727434, -0.0370571 ,
-0.04509224, -0.04207632, -0.04116198, -0.04545179, -0.04584584,
-0.05287341, -0.05417433, -0.05175836, -0.05005509, -0.04268674,
-0.03442321, -0.03457309, -0.03613426, -0.03524391, -0.03629479,
-0.04361312, -0.02626705, -0.02406115, -0.03046384, -0.03181044,
-0.03375164, -0.03661673, -0.04520779, -0.04926951, -0.05726738,
-0.0584486 , -0.06220608, -0.06800563, -0.06797431, -0.07562211,
-0.07481996, -0.07731229, -0.08413381, -0.09031826, -0.09691925,
-0.11018071, -0.11952675, -0.10826026, -0.11173895, -0.10756359,
-0.10775916, -0.11664559, -0.10505051, -0.10606547, -0.09855355,
-0.10004159, -0.10857084, -0.12209301, -0.11605758, -0.11105113,
-0.1155195 , -0.11569505, -0.10513348, -0.09611072, -0.10719791,
-0.10843965, -0.11025856, -0.10247839, -0.10554044, -0.10927647,
-0.10645088, -0.09982498, -0.10542734, -0.09631372, -0.08229695])
test_beta_mean = eval_beta(self.long_data, self.long_bench, 'value')
test_beta_roll = self.long_data['beta'].values
self.assertAlmostEqual(test_beta_mean, np.nanmean(expected_beta))
self.assertTrue(np.allclose(test_beta_roll, expected_beta, equal_nan=True))
def test_alpha(self):
reference = self.test_data1
self.assertAlmostEqual(eval_alpha(self.test_data2, 5, reference, 'value', 0.5), 11.63072977)
self.assertAlmostEqual(eval_alpha(self.test_data3, 5, reference, 'value', 0.5), 1.886590071)
self.assertAlmostEqual(eval_alpha(self.test_data4, 5, reference, 'value', 0.5), 6.827021872)
self.assertAlmostEqual(eval_alpha(self.test_data5, 5, reference, 'value', 0.92), -1.192265168)
self.assertAlmostEqual(eval_alpha(self.test_data6, 5, reference, 'value', 0.92), -1.437142359)
self.assertAlmostEqual(eval_alpha(self.test_data7, 5, reference, 'value', 0.92), -1.781311545)
# 测试长数据的alpha计算
expected_alpha = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.09418119, -0.11188463, -0.17938358, -0.15588172, -0.1462678 ,
-0.13089586, -0.10780125, -0.09102891, -0.03987585, -0.06075686,
-0.02459503, -0.04104284, -0.0444565 , -0.04074585, 0.02191275,
0.02255955, -0.05583375, -0.05875539, -0.06055551, -0.09648245,
-0.07913737, -0.10627829, -0.12320965, -0.12368335, -0.1506743 ,
-0.15768033, -0.13638829, -0.13065298, -0.14537834, -0.127428 ,
-0.15504529, -0.18184636, -0.12652146, -0.09190138, -0.14847221,
-0.15840648, -0.1525789 , -0.11859418, -0.14700954, -0.16295761,
-0.16051645, -0.10364859, -0.11961134, -0.10258267, -0.08090148,
-0.05727746, -0.0429945 , -0.04672356, -0.03581408, -0.0439215 ,
-0.03429495, -0.0260362 , -0.01075022, 0.04931808, 0.02779388,
0.03984083, 0.08311951, 0.08995566, 0.10522428, 0.16159058,
0.14238174, 0.14759783, 0.16257712, 0.158908 , 0.11302115,
0.0909566 , 0.08272888, 0.15261884, 0.10546376, 0.04990313,
-0.01284111, -0.02720704, 0.00454725, -0.03965491, -0.03818265,
-0.02186992, -0.06574751, -0.04846454, -0.05204211, -0.06316498,
-0.05095099, -0.08502656, -0.04681162, -0.02362027, -0.02205091,
-0.07706374, -0.10371841, -0.14434688, -0.14797935, -0.09055402,
-0.06739549, -0.08824959, -0.04855888, -0.02291244, 0.04027138,
0.09370505, 0.11472939, 0.10243593, 0.0921445 , 0.07662648,
0.07946651, 0.05450718, 0.10497677, 0.09068334, 0.15462924,
0.14231034, 0.10544952, 0.09980256, 0.14035223, 0.14942974,
0.17624102, 0.19035477, 0.2500807 , 0.30724652, 0.31768915,
0.35007521, 0.38412975, 0.34356521, 0.33614463, 0.41206165,
0.33999177, 0.28045963, 0.34076789, 0.42220356, 0.42314636,
0.50790423, 0.47713348, 0.42520169, 0.50488411, 0.48705211,
0.46252601, 0.44325578, 0.42640573, 0.37986783, 0.30652822,
0.34503393, 0.2999069 , 0.24928617, 0.24730218, 0.24326897,
0.26657905, 0.27861168, 0.26392824, 0.32552649, 0.34177792,
0.37837011, 0.37025267, 0.4030612 , 0.41339361, 0.45076809,
0.40383354, 0.47093422, 0.52505036, 0.53614256, 0.5500943 ,
0.55319293, 0.59021451, 0.52358459, 0.50605947, 0.49359168,
0.47895956, 0.49320243, 0.4908336 , 0.47310767, 0.51821564,
0.55105932, 0.57291504, 0.5599809 , 0.46868842, 0.39620087,
0.42086934, 0.38317217, 0.45934108, 0.50048866, 0.53941991,
0.50676751, 0.46500915, 0.52993663, 0.51668366, 0.46405428,
0.44100603, 0.52726147, 0.51565458, 0.49186248, 0.49001081,
0.49367648, 0.56422294, 0.58882785, 0.51334664, 0.44386256,
0.35056709, 0.36490029, 0.39205071, 0.3677061 , 0.41134736,
0.42315067, 0.35356394, 0.40324562, 0.41340007, 0.46503322,
0.44355762, 0.34854314, 0.26412842, 0.28633753, 0.32335224,
0.30761141, 0.29709569, 0.29570487, 0.28000063, 0.32802547,
0.33967726, 0.42511212, 0.46252357, 0.44244974, 0.42152907,
0.45436727, 0.50482359, 0.57339198, 0.6573356 , 0.70912003,
0.60328917, 0.6395092 , 0.67015805, 0.64241557, 0.62779142,
0.55028063, 0.46448736, 0.43709245, 0.46777983, 0.51789439,
0.48594916, 0.4456216 , 0.52008189, 0.60548684, 0.62792473,
0.56645031, 0.62766439, 0.71829315, 0.69481356, 0.59550329,
0.58133754, 0.59014148, 0.58026655, 0.61719273, 0.67373203,
0.75573056, 0.89501633, 0.8347253 , 0.87964685, 0.89015835])
test_alpha_mean = eval_alpha(self.long_data, 100, self.long_bench, 'value')
test_alpha_roll = self.long_data['alpha'].values
self.assertAlmostEqual(test_alpha_mean, np.nanmean(expected_alpha))
self.assertTrue(np.allclose(test_alpha_roll, expected_alpha, equal_nan=True))
def test_calmar(self):
"""test evaluate function eval_calmar()"""
pass
def test_benchmark(self):
reference = self.test_data1
tr, yr = eval_benchmark(self.test_data2, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data3, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data4, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data5, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data6, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data7, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
def test_evaluate(self):
pass
class TestLoop(unittest.TestCase):
"""通过一个假设但精心设计的例子来测试loop_step以及loop方法的正确性"""
def setUp(self):
self.shares = ['share1', 'share2', 'share3', 'share4', 'share5', 'share6', 'share7']
self.dates = ['2016/07/01', '2016/07/04', '2016/07/05', '2016/07/06', '2016/07/07',
'2016/07/08', '2016/07/11', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/15', '2016/07/18', '2016/07/19', '2016/07/20', '2016/07/21',
'2016/07/22', '2016/07/25', '2016/07/26', '2016/07/27', '2016/07/28',
'2016/07/29', '2016/08/01', '2016/08/02', '2016/08/03', '2016/08/04',
'2016/08/05', '2016/08/08', '2016/08/09', '2016/08/10', '2016/08/11',
'2016/08/12', '2016/08/15', '2016/08/16', '2016/08/17', '2016/08/18',
'2016/08/19', '2016/08/22', '2016/08/23', '2016/08/24', '2016/08/25',
'2016/08/26', '2016/08/29', '2016/08/30', '2016/08/31', '2016/09/01',
'2016/09/02', '2016/09/05', '2016/09/06', '2016/09/07', '2016/09/08',
'2016/09/09', '2016/09/12', '2016/09/13', '2016/09/14', '2016/09/15',
'2016/09/16', '2016/09/19', '2016/09/20', '2016/09/21', '2016/09/22',
'2016/09/23', '2016/09/26', '2016/09/27', '2016/09/28', '2016/09/29',
'2016/09/30', '2016/10/10', '2016/10/11', '2016/10/12', '2016/10/13',
'2016/10/14', '2016/10/17', '2016/10/18', '2016/10/19', '2016/10/20',
'2016/10/21', '2016/10/23', '2016/10/24', '2016/10/25', '2016/10/26',
'2016/10/27', '2016/10/29', '2016/10/30', '2016/10/31', '2016/11/01',
'2016/11/02', '2016/11/05', '2016/11/06', '2016/11/07', '2016/11/08',
'2016/11/09', '2016/11/12', '2016/11/13', '2016/11/14', '2016/11/15',
'2016/11/16', '2016/11/19', '2016/11/20', '2016/11/21', '2016/11/22']
self.dates = [pd.Timestamp(date_text) for date_text in self.dates]
self.prices = np.array([[5.35, 5.09, 5.03, 4.98, 4.50, 5.09, 4.75],
[5.66, 4.84, 5.21, 5.44, 4.35, 5.06, 4.48],
[5.79, 4.60, 5.02, 5.45, 4.07, 4.76, 4.56],
[5.56, 4.63, 5.50, 5.74, 3.88, 4.62, 4.62],
[5.88, 4.64, 5.07, 5.46, 3.74, 4.63, 4.62],
[6.25, 4.51, 5.11, 5.45, 3.98, 4.25, 4.59],
[5.93, 4.96, 5.15, 5.24, 4.08, 4.13, 4.33],
[6.39, 4.65, 5.02, 5.47, 4.00, 3.91, 3.88],
[6.31, 4.26, 5.10, 5.58, 4.28, 3.77, 3.47],
[5.86, 3.77, 5.24, 5.36, 4.01, 3.43, 3.51],
[5.61, 3.39, 4.93, 5.38, 4.14, 3.68, 3.83],
[5.31, 3.76, 4.96, 5.30, 4.49, 3.63, 3.67],
[5.40, 4.06, 5.40, 5.77, 4.49, 3.94, 3.79],
[5.03, 3.87, 5.74, 5.75, 4.46, 4.40, 4.18],
[5.38, 3.91, 5.53, 6.15, 4.13, 4.03, 4.02],
[5.79, 4.13, 5.79, 6.04, 3.79, 3.93, 4.41],
[6.27, 4.27, 5.68, 6.01, 4.23, 3.50, 4.65],
[6.59, 4.57, 5.90, 5.71, 4.57, 3.39, 4.89],
[6.91, 5.04, 5.75, 5.23, 4.92, 3.30, 4.41],
[6.71, 5.31, 6.11, 5.47, 5.28, 3.25, 4.66],
[6.33, 5.40, 5.77, 5.79, 5.67, 2.94, 4.37],
[6.07, 5.21, 5.85, 5.82, 6.00, 2.71, 4.58],
[5.98, 5.06, 5.61, 5.61, 5.89, 2.55, 4.76],
[6.46, 4.69, 5.75, 5.31, 5.55, 2.21, 4.37],
[6.95, 5.12, 5.50, 5.24, 5.39, 2.29, 4.16],
[6.77, 5.27, 5.14, 5.41, 5.26, 2.21, 4.02],
[6.70, 5.72, 5.31, 5.60, 5.31, 2.04, 3.77],
[6.28, 6.10, 5.68, 5.28, 5.22, 2.39, 3.38],
[6.61, 6.27, 5.73, 4.99, 4.90, 2.30, 3.07],
[6.25, 6.49, 6.04, 5.09, 4.57, 2.41, 2.90],
[6.47, 6.16, 6.27, 5.39, 4.96, 2.40, 2.50],
[6.45, 6.26, 6.60, 5.58, 4.82, 2.79, 2.76],
[6.88, 6.39, 6.10, 5.33, 4.39, 2.67, 2.29],
[7.00, 6.58, 6.25, 5.48, 4.63, 2.27, 2.17],
[6.59, 6.20, 6.73, 5.10, 5.05, 2.09, 1.84],
[6.59, 5.70, 6.91, 5.39, 4.68, 2.55, 1.83],
[6.64, 5.20, 7.01, 5.30, 5.02, 2.22, 2.21],
[6.38, 5.37, 7.36, 5.04, 4.84, 2.59, 2.00],
[6.10, 5.40, 7.72, 5.51, 4.60, 2.59, 1.76],
[6.35, 5.22, 7.68, 5.43, 4.66, 2.95, 1.27],
[6.52, 5.38, 7.62, 5.23, 4.41, 2.69, 1.40],
[6.87, 5.53, 7.74, 4.99, 4.87, 2.20, 1.11],
[6.84, 6.03, 7.53, 5.43, 4.42, 2.69, 1.60],
[7.09, 5.77, 7.46, 5.40, 4.08, 2.65, 1.23],
[6.88, 5.66, 7.84, 5.60, 4.16, 2.63, 1.59],
[6.84, 6.08, 8.11, 5.66, 4.10, 2.14, 1.50],
[6.98, 5.62, 8.04, 6.01, 4.43, 2.39, 1.80],
[7.02, 5.63, 7.65, 5.64, 4.07, 1.95, 1.55],
[7.13, 6.11, 7.52, 5.67, 3.97, 2.32, 1.35],
[7.59, 6.03, 7.67, 5.30, 4.16, 2.69, 1.51],
[7.61, 6.27, 7.47, 4.91, 4.12, 2.51, 1.08],
[7.21, 6.28, 7.44, 5.37, 4.04, 2.62, 1.06],
[7.48, 6.52, 7.59, 5.75, 3.84, 2.16, 1.43],
[7.66, 7.00, 7.94, 6.08, 3.46, 2.35, 1.43],
[7.51, 7.34, 8.25, 6.58, 3.18, 2.31, 1.74],
[7.12, 7.34, 7.77, 6.78, 3.10, 1.96, 1.44],
[6.97, 7.68, 8.03, 7.20, 3.55, 2.35, 1.83],
[6.67, 8.09, 7.87, 7.65, 3.66, 2.58, 1.71],
[6.20, 7.68, 7.58, 8.00, 3.66, 2.40, 2.12],
[6.34, 7.58, 7.33, 7.92, 3.29, 2.20, 2.45],
[6.22, 7.46, 7.22, 8.30, 2.80, 2.31, 2.85],
[5.98, 7.59, 6.86, 8.46, 2.88, 2.16, 2.79],
[6.37, 7.19, 7.18, 7.99, 3.04, 2.16, 2.91],
[6.56, 7.40, 7.54, 8.19, 3.45, 2.20, 3.26],
[6.26, 7.48, 7.24, 8.61, 3.88, 1.73, 3.14],
[6.69, 7.93, 6.85, 8.66, 3.58, 1.93, 3.53],
[7.13, 8.23, 6.60, 8.91, 3.60, 2.25, 3.65],
[6.83, 8.35, 6.65, 9.08, 3.97, 2.69, 3.69],
[7.31, 8.44, 6.74, 9.34, 4.05, 2.59, 3.50],
[7.43, 8.35, 7.19, 8.96, 4.40, 2.14, 3.25],
[7.54, 8.58, 7.14, 8.98, 4.06, 1.68, 3.64],
[7.18, 8.82, 6.88, 8.50, 3.60, 1.98, 4.00],
[7.21, 9.09, 7.14, 8.65, 3.61, 2.14, 3.63],
[7.45, 9.02, 7.30, 8.94, 4.10, 1.89, 3.78],
[7.37, 8.87, 6.95, 8.63, 3.74, 1.97, 3.42],
[6.88, 9.22, 7.02, 8.65, 4.02, 1.99, 3.76],
[7.08, 9.04, 7.38, 8.40, 3.95, 2.37, 3.62],
[6.75, 8.60, 7.50, 8.38, 3.81, 2.14, 3.67],
[6.60, 8.48, 7.60, 8.23, 3.71, 2.35, 3.61],
[6.21, 8.71, 7.15, 8.04, 3.94, 1.86, 3.39],
[6.36, 8.79, 7.30, 7.91, 4.43, 2.14, 3.43],
[6.82, 8.93, 7.80, 7.57, 4.07, 2.39, 3.33],
[6.45, 9.36, 8.15, 7.73, 4.04, 2.53, 3.03],
[6.85, 9.68, 8.40, 7.74, 4.34, 2.47, 3.28],
[6.48, 10.16, 8.87, 8.07, 4.80, 2.93, 3.46],
[6.10, 10.56, 8.53, 7.99, 5.18, 3.09, 3.25],
[5.64, 10.63, 8.94, 7.92, 4.90, 2.93, 2.95],
[6.01, 10.55, 8.52, 8.40, 5.40, 3.22, 2.87],
[6.21, 10.65, 8.80, 8.80, 5.73, 3.06, 2.63],
[6.61, 10.55, 8.92, 8.47, 5.62, 2.90, 2.40],
[7.02, 10.19, 9.20, 8.07, 5.20, 2.68, 2.53],
[7.04, 10.48, 8.71, 7.87, 4.85, 2.46, 2.96],
[6.77, 10.36, 8.25, 8.02, 5.18, 2.41, 3.26],
[7.09, 10.03, 8.19, 8.39, 4.72, 2.74, 2.97],
[6.65, 10.24, 7.80, 8.69, 4.62, 3.15, 3.16],
[7.07, 10.01, 7.69, 8.81, 4.55, 3.40, 3.58],
[6.80, 10.14, 7.23, 8.99, 4.37, 3.82, 3.23],
[6.79, 10.31, 6.98, 9.10, 4.26, 4.02, 3.62],
[6.48, 9.88, 7.07, 8.90, 4.25, 3.76, 3.13],
[6.39, 10.05, 6.95, 8.87, 4.59, 4.10, 2.93]])
self.op_signals = np.array([[0, 0, 0, 0, 0.25, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0.1, 0.15],
[0.2, 0.2, 0, 0, 0, 0, 0],
[0, 0, 0.1, 0, 0, 0, 0],
[0, 0, 0, 0, -0.75, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[-0.333, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, -0.5, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, -1],
[0, 0, 0, 0, 0.2, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[-0.5, 0, 0, 0.15, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0.2, 0, -1, 0.2, 0],
[0.5, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0.2, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, -0.5, 0.2],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0.2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0.15, 0, 0],
[-1, 0, 0.25, 0.25, 0, 0.25, 0],
[0, 0, 0, 0, 0, 0, 0],
[0.25, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0.2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, -1, 0, 0.2],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, -1, 0, 0, 0, 0, 0],
[-1, 0, 0.15, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
self.cash = qt.CashPlan(['2016/07/01', '2016/08/12', '2016/09/23'], [10000, 10000, 10000])
self.rate = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=0,
sell_min=0,
slipage=0)
self.rate2 = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=10,
sell_min=5,
slipage=0)
self.op_signal_df = pd.DataFrame(self.op_signals, index=self.dates, columns=self.shares)
self.history_list = pd.DataFrame(self.prices, index=self.dates, columns=self.shares)
self.res = np.array([[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 205.065, 321.089, 5059.722, 0.000, 9761.111],
[346.982, 416.679, 0.000, 0.000, 555.556, 205.065, 321.089, 1201.278, 0.000, 9646.112],
[346.982, 416.679, 191.037, 0.000, 555.556, 205.065, 321.089, 232.719, 0.000, 9685.586],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9813.218],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9803.129],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9608.020],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9311.573],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8883.625],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8751.390],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8794.181],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9136.570],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9209.359],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9093.829],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9387.554],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9585.959],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 9928.777],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10060.381],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10281.002],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10095.561],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 0.000, 4506.393, 0.000, 10029.957],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9875.613],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9614.946],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9824.172],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9732.574],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9968.339],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 10056.158],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9921.492],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9894.162],
[115.719, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 6179.774, 0.000, 20067.937],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21133.508],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20988.848],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20596.743],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 19910.773],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20776.707],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20051.797],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20725.388],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20828.880],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21647.181],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21310.169],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20852.099],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21912.395],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21937.828],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21962.458],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21389.402],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22027.453],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 20939.999],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21250.064],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22282.781],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21407.066],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21160.237],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21826.768],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22744.940],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23466.118],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22017.882],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23191.466],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23099.082],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22684.767],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22842.135],
[1073.823, 416.679, 735.644, 269.850, 1785.205, 938.697, 1339.207, 5001.425, 0, 33323.836],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 32820.290],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 33174.614],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35179.466],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 34465.195],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 34712.354],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35755.550],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37895.223],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37854.284],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37198.374],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35916.711],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35806.937],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 36317.592],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37103.973],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35457.883],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 36717.685],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37641.463],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 36794.298],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37073.817],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35244.299],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37062.382],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37420.067],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 38089.058],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 39260.542],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 42609.684],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 43109.309],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 42283.408],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 43622.444],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 42830.254],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 41266.463],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 41164.839],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 41797.937],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 42440.861],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 42113.839],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 43853.588],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 46216.760],
[0.000, 0.000, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 5140.743, 0.000, 45408.737],
[0.000, 0.000, 2027.188, 719.924, 0.000, 2701.488, 4379.099, 0.000, 0.000, 47413.401],
[0.000, 0.000, 2027.188, 719.924, 0.000, 2701.488, 4379.099, 0.000, 0.000, 44603.718],
[0.000, 0.000, 2027.188, 719.924, 0.000, 2701.488, 4379.099, 0.000, 0.000, 44381.544]])
def test_loop_step(self):
cash, amounts, fee, value = qt.core._loop_step(pre_cash=10000,
pre_amounts=np.zeros(7, dtype='float'),
op=self.op_signals[0],
prices=self.prices[0],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
self.assertAlmostEqual(value, 10000.00)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=5059.722222,
pre_amounts=np.array([0, 0, 0, 0, 555.5555556,
205.0653595, 321.0891813]),
op=self.op_signals[3],
prices=self.prices[3],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 1201.2775195, 5)
self.assertTrue(np.allclose(amounts, np.array([346.9824373, 416.6786936, 0, 0,
555.5555556, 205.0653595, 321.0891813])))
self.assertAlmostEqual(value, 9646.111756, 5)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=6179.77423,
pre_amounts=np.array([115.7186428, 416.6786936, 735.6441811,
269.8495646, 0, 1877.393446, 0]),
op=self.op_signals[31],
prices=self.prices[31],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 0, 5)
self.assertTrue(np.allclose(amounts, np.array([1073.823175, 416.6786936, 735.6441811,
269.8495646, 0, 1877.393446, 0])))
self.assertAlmostEqual(value, 21133.50798, 5)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=10000,
pre_amounts=np.array([1073.823175, 416.6786936, 735.6441811,
269.8495646, 0, 938.6967231, 1339.207325]),
op=self.op_signals[60],
prices=self.prices[60],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 5001.424618, 5)
self.assertTrue(np.allclose(amounts, np.array([1073.823175, 416.6786936, 735.6441811, 269.8495646,
1785.205494, 938.6967231, 1339.207325])))
self.assertAlmostEqual(value, 33323.83588, 5)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=cash,
pre_amounts=amounts,
op=self.op_signals[61],
prices=self.prices[61],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 0, 5)
self.assertTrue(np.allclose(amounts, np.array([0, 416.6786936, 1290.69215, 719.9239224,
1785.205494, 2701.487958, 1339.207325])))
self.assertAlmostEqual(value, 32820.29007, 5)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=915.6208259,
pre_amounts=np.array([0, 416.6786936, 1290.69215, 719.9239224,
0, 2701.487958, 4379.098907]),
op=self.op_signals[96],
prices=self.prices[96],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 5140.742779, 5)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 1290.69215, 719.9239224, 0, 2701.487958, 4379.098907])))
self.assertAlmostEqual(value, 45408.73655, 4)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=cash,
pre_amounts=amounts,
op=self.op_signals[97],
prices=self.prices[97],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 0, 5)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 2027.18825, 719.9239224, 0, 2701.487958, 4379.098907])))
self.assertAlmostEqual(value, 47413.40131, 4)
def test_loop(self):
res = apply_loop(op_list=self.op_signal_df,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res.values, self.res, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
self.op_signal_df,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
self.op_signal_df,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_list=self.op_signal_df,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
class TestOperatorSubFuncs(unittest.TestCase):
def setUp(self):
mask_list = [[0.0, 0.0, 0.0, 0.0],
[0.5, 0.0, 0.0, 1.0],
[0.5, 0.0, 0.3, 1.0],
[0.5, 0.0, 0.3, 0.5],
[0.5, 0.5, 0.3, 0.5],
[0.5, 0.5, 0.3, 1.0],
[0.3, 0.5, 0.0, 1.0],
[0.3, 1.0, 0.0, 1.0]]
signal_list = [[0.0, 0.0, 0.0, 0.0],
[0.5, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.3, 0.0],
[0.0, 0.0, 0.0, -0.5],
[0.0, 0.5, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.5],
[-0.4, 0.0, -1.0, 0.0],
[0.0, 0.5, 0.0, 0.0]]
mask_multi = [[[0, 0, 1, 1, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 0, 1, 1],
[0, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
[1, 0, 0, 0, 1]],
[[0, 0, 1, 0, 1],
[0, 1, 1, 1, 1],
[1, 1, 0, 1, 1],
[1, 1, 1, 0, 0],
[1, 1, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 0, 1, 0],
[0, 1, 0, 1, 0]],
[[0, 0, 0., 0, 1],
[0, 0, 1., 0, 1],
[0, 0, 1., 0, 1],
[1, 0, 1., 0, 1],
[1, 1, .5, 1, 1],
[1, 0, .5, 1, 0],
[1, 1, .5, 1, 0],
[0, 1, 0., 0, 0],
[1, 0, 0., 0, 0],
[0, 1, 0., 0, 0]]]
signal_multi = [[[0., 0., 1., 1., 0.],
[0., 1., 0., 0., 0.],
[1., 0., 0., 0., 0.],
[0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0.],
[0., 0., -1., 0., 0.],
[-1., 0., 0., -1., 0.],
[1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., -1., 0., 0., 0.]],
[[0., 0., 1., 0., 1.],
[0., 1., 0., 1., 0.],
[1., 0., -1., 0., 0.],
[0., 0., 1., -1., -1.],
[0., 0., -1., 0., 0.],
[0., -1., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 1., 0., 0., 0.],
[0., 0., 0., 1., 0.],
[-1., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 1.],
[0., 0., 1., 0., 0.],
[0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0.],
[0., 1., -0.5, 1., 0.],
[0., -1., 0., 0., -1.],
[0., 1., 0., 0., 0.],
[-1., 0., -1., -1., 0.],
[1., -1., 0., 0., 0.],
[-1., 1., 0., 0., 0.]]]
self.mask = np.array(mask_list)
self.multi_mask = np.array(mask_multi)
self.correct_signal = np.array(signal_list)
self.correct_multi_signal = np.array(signal_multi)
self.op = qt.Operator()
def test_ls_blend(self):
"""测试多空蒙板的混合器,三种混合方式均需要测试"""
ls_mask1 = [[0.0, 0.0, 0.0, -0.0],
[1.0, 0.0, 0.0, -1.0],
[1.0, 0.0, 1.0, -1.0],
[1.0, 0.0, 1.0, -1.0],
[1.0, 1.0, 1.0, -1.0],
[1.0, 1.0, 1.0, -1.0],
[0.0, 1.0, 0.0, -1.0],
[0.0, 1.0, 0.0, -1.0]]
ls_mask2 = [[0.0, 0.0, 0.5, -0.5],
[0.0, 0.0, 0.5, -0.3],
[0.0, 0.5, 0.5, -0.0],
[0.5, 0.5, 0.3, -0.0],
[0.5, 0.5, 0.3, -0.3],
[0.5, 0.5, 0.0, -0.5],
[0.3, 0.5, 0.0, -1.0],
[0.3, 1.0, 0.0, -1.0]]
ls_mask3 = [[0.5, 0.0, 1.0, -0.4],
[0.4, 0.0, 1.0, -0.3],
[0.3, 0.0, 0.8, -0.2],
[0.2, 0.0, 0.6, -0.1],
[0.1, 0.2, 0.4, -0.2],
[0.1, 0.3, 0.2, -0.5],
[0.1, 0.4, 0.0, -0.5],
[0.1, 0.5, 0.0, -1.0]]
# result with blender 'avg'
ls_blnd_avg = [[0.16666667, 0.00000000, 0.50000000, -0.3],
[0.46666667, 0.00000000, 0.50000000, -0.53333333],
[0.43333333, 0.16666667, 0.76666667, -0.4],
[0.56666667, 0.16666667, 0.63333333, -0.36666667],
[0.53333333, 0.56666667, 0.56666667, -0.5],
[0.53333333, 0.60000000, 0.40000000, -0.66666667],
[0.13333333, 0.63333333, 0.00000000, -0.83333333],
[0.13333333, 0.83333333, 0.00000000, -1.]]
# result with blender 'str-1.5'
ls_blnd_str_15 = [[0, 0, 1, 0],
[0, 0, 1, -1],
[0, 0, 1, 0],
[1, 0, 1, 0],
[1, 1, 1, -1],
[1, 1, 0, -1],
[0, 1, 0, -1],
[0, 1, 0, -1]]
# result with blender 'pos-2' == 'pos-2-0'
ls_blnd_pos_2 = [[0, 0, 1, -1],
[1, 0, 1, -1],
[1, 0, 1, -1],
[1, 0, 1, -1],
[1, 1, 1, -1],
[1, 1, 1, -1],
[1, 1, 0, -1],
[1, 1, 0, -1]]
# result with blender 'pos-2-0.25'
ls_blnd_pos_2_25 = [[0, 0, 1, -1],
[1, 0, 1, -1],
[1, 0, 1, 0],
[1, 0, 1, 0],
[1, 1, 1, -1],
[1, 1, 0, -1],
[0, 1, 0, -1],
[0, 1, 0, -1]]
# result with blender 'avg_pos-2' == 'pos-2-0'
ls_blnd_avg_pos_2 = [[0.00000000, 0.00000000, 0.50000000, -0.3],
[0.46666667, 0.00000000, 0.50000000, -0.53333333],
[0.43333333, 0.00000000, 0.76666667, -0.4],
[0.56666667, 0.00000000, 0.63333333, -0.36666667],
[0.53333333, 0.56666667, 0.56666667, -0.5],
[0.53333333, 0.60000000, 0.40000000, -0.66666667],
[0.13333333, 0.63333333, 0.00000000, -0.83333333],
[0.13333333, 0.83333333, 0.00000000, -1.]]
# result with blender 'avg_pos-2-0.25'
ls_blnd_avg_pos_2_25 = [[0.00000000, 0.00000000, 0.50000000, -0.3],
[0.46666667, 0.00000000, 0.50000000, -0.53333333],
[0.43333333, 0.00000000, 0.76666667, 0.00000000],
[0.56666667, 0.00000000, 0.63333333, 0.00000000],
[0.53333333, 0.56666667, 0.56666667, -0.5],
[0.53333333, 0.60000000, 0.00000000, -0.66666667],
[0.00000000, 0.63333333, 0.00000000, -0.83333333],
[0.00000000, 0.83333333, 0.00000000, -1.]]
# result with blender 'combo'
ls_blnd_combo = [[0.5, 0., 1.5, -0.9],
[1.4, 0., 1.5, -1.6],
[1.3, 0.5, 2.3, -1.2],
[1.7, 0.5, 1.9, -1.1],
[1.6, 1.7, 1.7, -1.5],
[1.6, 1.8, 1.2, -2.],
[0.4, 1.9, 0., -2.5],
[0.4, 2.5, 0., -3.]]
ls_masks = np.array([np.array(ls_mask1), np.array(ls_mask2), np.array(ls_mask3)])
# test A: the ls_blender 'str-T'
self.op.set_blender('ls', 'str-1.5')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'test A: result of ls_blender: str-1.5: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_str_15))
# test B: the ls_blender 'pos-N-T'
self.op.set_blender('ls', 'pos-2')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test B-1: result of ls_blender: pos-2: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_pos_2))
self.op.set_blender('ls', 'pos-2-0.25')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test B-2: result of ls_blender: pos-2-0.25: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_pos_2_25))
# test C: the ls_blender 'avg_pos-N-T'
self.op.set_blender('ls', 'avg_pos-2')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test C-1: result of ls_blender: avg_pos-2: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_pos_2, 5))
self.op.set_blender('ls', 'avg_pos-2-0.25')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test C-2: result of ls_blender: avg_pos-2-0.25: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_pos_2_25, 5))
# test D: the ls_blender 'avg'
self.op.set_blender('ls', 'avg')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test D: result of ls_blender: avg: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_avg))
# test E: the ls_blender 'combo'
self.op.set_blender('ls', 'combo')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test E: result of ls_blender: combo: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_combo))
def test_sel_blend(self):
"""测试选股蒙板的混合器,包括所有的混合模式"""
# step2, test blending of sel masks
pass
def test_bs_blend(self):
"""测试买卖信号混合模式"""
# step3, test blending of op signals
pass
def test_unify(self):
print('Testing Unify functions\n')
l1 = np.array([[3, 2, 5], [5, 3, 2]])
res = qt.unify(l1)
target = np.array([[0.3, 0.2, 0.5], [0.5, 0.3, 0.2]])
self.assertIs(np.allclose(res, target), True, 'sum of all elements is 1')
l1 = np.array([[1, 1, 1, 1, 1], [2, 2, 2, 2, 2]])
res = qt.unify(l1)
target = np.array([[0.2, 0.2, 0.2, 0.2, 0.2], [0.2, 0.2, 0.2, 0.2, 0.2]])
self.assertIs(np.allclose(res, target), True, 'sum of all elements is 1')
def test_mask_to_signal(self):
signal = qt.mask_to_signal(self.mask)
print(f'Test A: single mask to signal, result: \n{signal}')
self.assertTrue(np.allclose(signal, self.correct_signal))
signal = qt.mask_to_signal(self.multi_mask)
print(f'Test A: single mask to signal, result: \n{signal}')
self.assertTrue(np.allclose(signal, self.correct_multi_signal))
class TestLSStrategy(qt.RollingTiming):
"""用于test测试的简单多空蒙板生成策略。基于RollingTiming滚动择时方法生成
该策略有两个参数,N与Price
N用于计算OHLC价格平均值的N日简单移动平均,判断,当移动平均值大于等于Price时,状态为看多,否则为看空
"""
def __init__(self):
super().__init__(stg_name='test_LS',
stg_text='test long/short strategy',
par_count=2,
par_types='discr, conti',
par_bounds_or_enums=([1, 5], [2, 10]),
data_types='close, open, high, low',
data_freq='d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
n, price = params
h = hist_data.T
avg = (h[0] + h[1] + h[2] + h[3]) / 4
ma = sma(avg, n)
if ma[-1] < price:
return 0
else:
return 1
class TestSelStrategy(qt.SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='high, low, close',
data_freq='d',
sample_freq='10d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = np.nanmean(hist_data, axis=(1, 2))
dif = (hist_data[:, :, 2] - np.roll(hist_data[:, :, 2], 1, 1))
dif_no_nan = np.array([arr[~np.isnan(arr)][-1] for arr in dif])
difper = dif_no_nan / avg
large2 = difper.argsort()[1:]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSelStrategyDiffTime(qt.SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='close, low, open',
data_freq='d',
sample_freq='w',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = hist_data.mean(axis=1).squeeze()
difper = (hist_data[:, :, 0] - np.roll(hist_data[:, :, 0], 1))[:, -1] / avg
large2 = difper.argsort()[0:2]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSigStrategy(qt.SimpleTiming):
"""用于Test测试的简单信号生成策略,基于SimpleTiming策略生成
策略有三个参数,第一个参数为ratio,另外两个参数为price1以及price2
ratio是k线形状比例的阈值,定义为abs((C-O)/(H-L))。当这个比值小于ratio阈值时,判断该K线为十字交叉(其实还有丁字等多种情形,但这里做了
简化处理。
信号生成的规则如下:
1,当某个K线出现十字交叉,且昨收与今收之差大于price1时,买入信号
2,当某个K线出现十字交叉,且昨收与今收之差小于price2时,卖出信号
"""
def __init__(self):
super().__init__(stg_name='test_SIG',
stg_text='test signal creation strategy',
par_count=3,
par_types='conti, conti, conti',
par_bounds_or_enums=([2, 10], [0, 3], [0, 3]),
data_types='close, open, high, low',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
r, price1, price2 = params
h = hist_data.T
ratio = np.abs((h[0] - h[1]) / (h[3] - h[2]))
diff = h[0] - np.roll(h[0], 1)
sig = np.where((ratio < r) & (diff > price1),
1,
np.where((ratio < r) & (diff < price2), -1, 0))
return sig
class TestOperator(unittest.TestCase):
"""全面测试Operator对象的所有功能。包括:
1, Strategy 参数的设置
2, 历史数据的获取与分配提取
3, 策略优化参数的批量设置和优化空间的获取
4, 策略输出值的正确性验证
5, 策略结果的混合结果确认
"""
def setUp(self):
"""prepare data for Operator test"""
print('start testing HistoryPanel object\n')
# build up test data: a 4-type, 3-share, 50-day matrix of prices that contains nan values in some days
# for some share_pool
# for share1:
data_rows = 50
share1_close = [10.04, 10, 10, 9.99, 9.97, 9.99, 10.03, 10.03, 10.06, 10.06, 10.11,
10.09, 10.07, 10.06, 10.09, 10.03, 10.03, 10.06, 10.08, 10, 9.99,
10.03, 10.03, 10.06, 10.03, 9.97, 9.94, 9.83, 9.77, 9.84, 9.91, 9.93,
9.96, 9.91, 9.91, 9.88, 9.91, 9.64, 9.56, 9.57, 9.55, 9.57, 9.61, 9.61,
9.55, 9.57, 9.63, 9.64, 9.65, 9.62]
share1_open = [10.02, 10, 9.98, 9.97, 9.99, 10.01, 10.04, 10.06, 10.06, 10.11,
10.11, 10.07, 10.06, 10.09, 10.03, 10.02, 10.06, 10.08, 9.99, 10,
10.03, 10.02, 10.06, 10.03, 9.97, 9.94, 9.83, 9.78, 9.77, 9.91, 9.92,
9.97, 9.91, 9.9, 9.88, 9.91, 9.63, 9.64, 9.57, 9.55, 9.58, 9.61, 9.62,
9.55, 9.57, 9.61, 9.63, 9.64, 9.61, 9.56]
share1_high = [10.07, 10, 10, 10, 10.03, 10.03, 10.04, 10.09, 10.1, 10.14, 10.11, 10.1,
10.09, 10.09, 10.1, 10.05, 10.07, 10.09, 10.1, 10, 10.04, 10.04, 10.06,
10.09, 10.05, 9.97, 9.96, 9.86, 9.77, 9.92, 9.94, 9.97, 9.97, 9.92, 9.92,
9.92, 9.93, 9.64, 9.58, 9.6, 9.58, 9.62, 9.62, 9.64, 9.59, 9.62, 9.63,
9.7, 9.66, 9.64]
share1_low = [9.99, 10, 9.97, 9.97, 9.97, 9.98, 9.99, 10.03, 10.03, 10.04, 10.11, 10.07,
10.05, 10.03, 10.03, 10.01, 9.99, 10.03, 9.95, 10, 9.95, 10, 10.01, 9.99,
9.96, 9.89, 9.83, 9.77, 9.77, 9.8, 9.9, 9.91, 9.89, 9.89, 9.87, 9.85, 9.6,
9.64, 9.53, 9.55, 9.54, 9.55, 9.58, 9.54, 9.53, 9.53, 9.63, 9.64, 9.59, 9.56]
# for share2:
share2_close = [9.68, 9.87, 9.86, 9.87, 9.79, 9.82, 9.8, 9.66, 9.62, 9.58, 9.69, 9.78, 9.75,
9.96, 9.9, 10.04, 10.06, 10.08, 10.24, 10.24, 10.24, 9.86, 10.13, 10.12,
10.1, 10.25, 10.24, 10.22, 10.75, 10.64, 10.56, 10.6, 10.42, 10.25, 10.24,
10.49, 10.57, 10.63, 10.48, 10.37, 10.96, 11.02, np.nan, np.nan, 10.88, 10.87, 11.01,
11.01, 11.58, 11.8]
share2_open = [9.88, 9.88, 9.89, 9.75, 9.74, 9.8, 9.62, 9.65, 9.58, 9.67, 9.81, 9.8, 10,
9.95, 10.1, 10.06, 10.14, 9.9, 10.2, 10.29, 9.86, 9.48, 10.01, 10.24, 10.26,
10.24, 10.12, 10.65, 10.64, 10.56, 10.42, 10.43, 10.29, 10.3, 10.44, 10.6,
10.67, 10.46, 10.39, 10.9, 11.01, 11.01, np.nan, np.nan, 10.82, 11.02, 10.96,
11.55, 11.74, 11.8]
share2_high = [9.91, 10.04, 9.93, 10.04, 9.84, 9.88, 9.99, 9.7, 9.67, 9.71, 9.85, 9.9, 10,
10.2, 10.11, 10.18, 10.21, 10.26, 10.38, 10.47, 10.42, 10.07, 10.24, 10.27,
10.38, 10.43, 10.39, 10.65, 10.84, 10.65, 10.73, 10.63, 10.51, 10.35, 10.46,
10.63, 10.74, 10.76, 10.54, 11.02, 11.12, 11.17, np.nan, np.nan, 10.92, 11.15,
11.11, 11.55, 11.95, 11.93]
share2_low = [9.63, 9.84, 9.81, 9.74, 9.67, 9.72, 9.57, 9.54, 9.51, 9.47, 9.68, 9.63, 9.75,
9.65, 9.9, 9.93, 10.03, 9.8, 10.14, 10.09, 9.78, 9.21, 9.11, 9.68, 10.05,
10.12, 9.89, 9.89, 10.59, 10.43, 10.34, 10.32, 10.21, 10.2, 10.18, 10.36,
10.51, 10.41, 10.32, 10.37, 10.87, 10.95, np.nan, np.nan, 10.65, 10.71, 10.75,
10.91, 11.31, 11.58]
# for share3:
share3_close = [6.64, 7.26, 7.03, 6.87, np.nan, 6.64, 6.85, 6.7, 6.39, 6.22, 5.92, 5.91, 6.11,
5.91, 6.23, 6.28, 6.28, 6.27, np.nan, 5.56, 5.67, 5.16, 5.69, 6.32, 6.14, 6.25,
5.79, 5.26, 5.05, 5.45, 6.06, 6.21, 5.69, 5.46, 6.02, 6.69, 7.43, 7.72, 8.16,
7.83, 8.7, 8.71, 8.88, 8.54, 8.87, 8.87, 8.18, 7.8, 7.97, 8.25]
share3_open = [7.26, 7, 6.88, 6.91, np.nan, 6.81, 6.63, 6.45, 6.16, 6.24, 5.96, 5.97, 5.96,
6.2, 6.35, 6.11, 6.37, 5.58, np.nan, 5.65, 5.19, 5.42, 6.3, 6.15, 6.05, 5.89,
5.22, 5.2, 5.07, 6.04, 6.12, 5.85, 5.67, 6.02, 6.04, 7.07, 7.64, 7.99, 7.59,
8.73, 8.72, 8.97, 8.58, 8.71, 8.77, 8.4, 7.95, 7.76, 8.25, 7.51]
share3_high = [7.41, 7.31, 7.14, 7, np.nan, 6.82, 6.96, 6.85, 6.5, 6.34, 6.04, 6.02, 6.12, 6.38,
6.43, 6.46, 6.43, 6.27, np.nan, 6.01, 5.67, 5.67, 6.35, 6.32, 6.43, 6.36, 5.79,
5.47, 5.65, 6.04, 6.14, 6.23, 5.83, 6.25, 6.27, 7.12, 7.82, 8.14, 8.27, 8.92,
8.76, 9.15, 8.9, 9.01, 9.16, 9, 8.27, 7.99, 8.33, 8.25]
share3_low = [6.53, 6.87, 6.83, 6.7, np.nan, 6.63, 6.57, 6.41, 6.15, 6.07, 5.89, 5.82, 5.73, 5.81,
6.1, 6.06, 6.16, 5.57, np.nan, 5.51, 5.19, 5.12, 5.69, 6.01, 5.97, 5.86, 5.18, 5.19,
4.96, 5.45, 5.84, 5.85, 5.28, 5.42, 6.02, 6.69, 7.28, 7.64, 7.25, 7.83, 8.41, 8.66,
8.53, 8.54, 8.73, 8.27, 7.95, 7.67, 7.8, 7.51]
# for sel_finance test
shares_eps = np.array([[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, 0.2, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[0.1, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.3, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.15, np.nan, np.nan],
[np.nan, 0.1, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.2, np.nan, np.nan],
[np.nan, 0.5, np.nan],
[0.4, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[0.9, np.nan, np.nan],
[np.nan, np.nan, 0.1]])
self.date_indices = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06',
'2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12',
'2016-07-13', '2016-07-14', '2016-07-15', '2016-07-18',
'2016-07-19', '2016-07-20', '2016-07-21', '2016-07-22',
'2016-07-25', '2016-07-26', '2016-07-27', '2016-07-28',
'2016-07-29', '2016-08-01', '2016-08-02', '2016-08-03',
'2016-08-04', '2016-08-05', '2016-08-08', '2016-08-09',
'2016-08-10', '2016-08-11', '2016-08-12', '2016-08-15',
'2016-08-16', '2016-08-17', '2016-08-18', '2016-08-19',
'2016-08-22', '2016-08-23', '2016-08-24', '2016-08-25',
'2016-08-26', '2016-08-29', '2016-08-30', '2016-08-31',
'2016-09-01', '2016-09-02', '2016-09-05', '2016-09-06',
'2016-09-07', '2016-09-08']
self.shares = ['000010', '000030', '000039']
self.types = ['close', 'open', 'high', 'low']
self.sel_finance_tyeps = ['eps']
self.test_data_3D = np.zeros((3, data_rows, 4))
self.test_data_2D = np.zeros((data_rows, 3))
self.test_data_2D2 = np.zeros((data_rows, 4))
self.test_data_sel_finance = np.empty((3, data_rows, 1))
# Build up 3D data
self.test_data_3D[0, :, 0] = share1_close
self.test_data_3D[0, :, 1] = share1_open
self.test_data_3D[0, :, 2] = share1_high
self.test_data_3D[0, :, 3] = share1_low
self.test_data_3D[1, :, 0] = share2_close
self.test_data_3D[1, :, 1] = share2_open
self.test_data_3D[1, :, 2] = share2_high
self.test_data_3D[1, :, 3] = share2_low
self.test_data_3D[2, :, 0] = share3_close
self.test_data_3D[2, :, 1] = share3_open
self.test_data_3D[2, :, 2] = share3_high
self.test_data_3D[2, :, 3] = share3_low
self.test_data_sel_finance[:, :, 0] = shares_eps.T
self.hp1 = qt.HistoryPanel(values=self.test_data_3D,
levels=self.shares,
columns=self.types,
rows=self.date_indices)
print(f'in test Operator, history panel is created for timing test')
self.hp1.info()
self.hp2 = qt.HistoryPanel(values=self.test_data_sel_finance,
levels=self.shares,
columns=self.sel_finance_tyeps,
rows=self.date_indices)
print(f'in test_Operator, history panel is created for selection finance test:')
self.hp2.info()
self.op = qt.Operator(selecting_types=['all'], timing_types='dma', ricon_types='urgent')
def test_info(self):
"""Test information output of Operator"""
print(f'test printing information of operator object')
# self.op.info()
def test_operator_ready(self):
"""test the method ready of Operator"""
pass
# print(f'operator is ready? "{self.op.ready}"')
def test_operator_add_strategy(self):
"""test adding strategies to Operator"""
pass
# self.assertIsInstance(self.op, qt.Operator)
# self.assertIsInstance(self.op.timing[0], qt.TimingDMA)
# self.assertIsInstance(self.op.selecting[0], qt.SelectingAll)
# self.assertIsInstance(self.op.ricon[0], qt.RiconUrgent)
# self.assertEqual(self.op.selecting_count, 1)
# self.assertEqual(self.op.strategy_count, 3)
# self.assertEqual(self.op.ricon_count, 1)
# self.assertEqual(self.op.timing_count, 1)
# self.assertEqual(self.op.ls_blender, 'pos-1')
# print(f'test adding strategies into existing op')
# print('test adding strategy by string')
# self.op.add_strategy('macd', 'timing')
# self.assertIsInstance(self.op.timing[0], qt.TimingDMA)
# self.assertIsInstance(self.op.timing[1], qt.TimingMACD)
# self.assertEqual(self.op.selecting_count, 1)
# self.assertEqual(self.op.strategy_count, 4)
# self.assertEqual(self.op.ricon_count, 1)
# self.assertEqual(self.op.timing_count, 2)
# self.assertEqual(self.op.ls_blender, 'pos-1')
# self.op.add_strategy('random', 'selecting')
# self.assertIsInstance(self.op.selecting[0], qt.TimingDMA)
# self.assertIsInstance(self.op.selecting[1], qt.TimingMACD)
# self.assertEqual(self.op.selecting_count, 2)
# self.assertEqual(self.op.strategy_count, 5)
# self.assertEqual(self.op.ricon_count, 1)
# self.assertEqual(self.op.timing_count, 2)
# self.assertEqual(self.op.selecting_blender, '0 or 1')
# self.op.add_strategy('none', 'ricon')
# self.assertIsInstance(self.op.ricon[0], qt.TimingDMA)
# self.assertIsInstance(self.op.ricon[1], qt.TimingMACD)
# self.assertEqual(self.op.selecting_count, 2)
# self.assertEqual(self.op.strategy_count, 6)
# self.assertEqual(self.op.ricon_count, 2)
# self.assertEqual(self.op.timing_count, 2)
# print('test adding strategy by list')
# self.op.add_strategy(['dma', 'macd'], 'timing')
# print('test adding strategy by object')
# test_ls = TestLSStrategy()
# self.op.add_strategy(test_ls, 'timing')
def test_operator_remove_strategy(self):
"""test removing strategies from Operator"""
pass
# self.op.remove_strategy(stg='macd')
def test_property_get(self):
self.assertIsInstance(self.op, qt.Operator)
self.assertIsInstance(self.op.timing[0], qt.TimingDMA)
self.assertIsInstance(self.op.selecting[0], qt.SelectingAll)
self.assertIsInstance(self.op.ricon[0], qt.RiconUrgent)
self.assertEqual(self.op.selecting_count, 1)
self.assertEqual(self.op.strategy_count, 3)
self.assertEqual(self.op.ricon_count, 1)
self.assertEqual(self.op.timing_count, 1)
print(self.op.strategies, '\n', [qt.TimingDMA, qt.SelectingAll, qt.RiconUrgent])
print(f'info of Timing strategy: \n{self.op.strategies[0].info()}')
self.assertEqual(len(self.op.strategies), 3)
self.assertIsInstance(self.op.strategies[0], qt.TimingDMA)
self.assertIsInstance(self.op.strategies[1], qt.SelectingAll)
self.assertIsInstance(self.op.strategies[2], qt.RiconUrgent)
self.assertEqual(self.op.strategy_count, 3)
self.assertEqual(self.op.op_data_freq, 'd')
self.assertEqual(self.op.op_data_types, ['close'])
self.assertEqual(self.op.opt_space_par, ([], []))
self.assertEqual(self.op.max_window_length, 270)
self.assertEqual(self.op.ls_blender, 'pos-1')
self.assertEqual(self.op.selecting_blender, '0')
self.assertEqual(self.op.ricon_blender, 'add')
self.assertEqual(self.op.opt_types, [0, 0, 0])
def test_prepare_data(self):
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sig = TestSigStrategy()
self.op = qt.Operator(timing_types=[test_ls],
selecting_types=[test_sel],
ricon_types=[test_sig])
too_early_cash = qt.CashPlan(dates='2016-01-01', amounts=10000)
early_cash = qt.CashPlan(dates='2016-07-01', amounts=10000)
on_spot_cash = qt.CashPlan(dates='2016-07-08', amounts=10000)
no_trade_cash = qt.CashPlan(dates='2016-07-08, 2016-07-30, 2016-08-11, 2016-09-03',
amounts=[10000, 10000, 10000, 10000])
late_cash = qt.CashPlan(dates='2016-12-31', amounts=10000)
multi_cash = qt.CashPlan(dates='2016-07-08, 2016-08-08', amounts=[10000, 10000])
self.op.set_parameter(stg_id='t-0',
pars={'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.op.set_parameter(stg_id='s-0',
pars=())
self.op.set_parameter(stg_id='r-0',
pars=(0.2, 0.02, -0.02))
self.op.prepare_data(hist_data=self.hp1,
cash_plan=on_spot_cash)
self.assertIsInstance(self.op._selecting_history_data, list)
self.assertIsInstance(self.op._timing_history_data, list)
self.assertIsInstance(self.op._ricon_history_data, list)
self.assertEqual(len(self.op._selecting_history_data), 1)
self.assertEqual(len(self.op._timing_history_data), 1)
self.assertEqual(len(self.op._ricon_history_data), 1)
sel_hist_data = self.op._selecting_history_data[0]
tim_hist_data = self.op._timing_history_data[0]
ric_hist_data = self.op._ricon_history_data[0]
print(f'in test_prepare_data in TestOperator:')
print('selecting history data:\n', sel_hist_data)
print('originally passed data in correct sequence:\n', self.test_data_3D[:, 3:, [2, 3, 0]])
print('difference is \n', sel_hist_data - self.test_data_3D[:, :, [2, 3, 0]])
self.assertTrue(np.allclose(sel_hist_data, self.test_data_3D[:, :, [2, 3, 0]], equal_nan=True))
self.assertTrue(np.allclose(tim_hist_data, self.test_data_3D, equal_nan=True))
self.assertTrue(np.allclose(ric_hist_data, self.test_data_3D[:, 3:, :], equal_nan=True))
# raises Value Error if empty history panel is given
empty_hp = qt.HistoryPanel()
correct_hp = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 4)),
columns=self.types,
levels=self.shares,
rows=self.date_indices)
too_many_shares = qt.HistoryPanel(values=np.random.randint(10, size=(5, 50, 4)))
too_many_types = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 5)))
# raises Error when history panel is empty
self.assertRaises(ValueError,
self.op.prepare_data,
empty_hp,
on_spot_cash)
# raises Error when first investment date is too early
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
early_cash)
# raises Error when last investment date is too late
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
late_cash)
# raises Error when some of the investment dates are on no-trade-days
self.assertRaises(ValueError,
self.op.prepare_data,
correct_hp,
no_trade_cash)
# raises Error when number of shares in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_shares,
on_spot_cash)
# raises Error when too early cash investment date
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
too_early_cash)
# raises Error when number of d_types in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_types,
on_spot_cash)
# test the effect of data type sequence in strategy definition
def test_operator_generate(self):
"""
:return:
"""
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sig = TestSigStrategy()
self.op = qt.Operator(timing_types=[test_ls],
selecting_types=[test_sel],
ricon_types=[test_sig])
self.assertIsInstance(self.op, qt.Operator, 'Operator Creation Error')
self.op.set_parameter(stg_id='t-0',
pars={'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.op.set_parameter(stg_id='s-0',
pars=())
# 在所有策略的参数都设置好之前调用prepare_data会发生assertion Error
self.assertRaises(AssertionError,
self.op.prepare_data,
hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
self.op.set_parameter(stg_id='r-0',
pars=(0.2, 0.02, -0.02))
self.op.prepare_data(hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
self.op.info()
op_list = self.op.create_signal(hist_data=self.hp1)
print(f'operation list is created: as following:\n {op_list}')
self.assertTrue(isinstance(op_list, pd.DataFrame))
self.assertEqual(op_list.shape, (26, 3))
# 删除去掉重复信号的code后,信号从原来的23条变为26条,包含三条重复信号,但是删除重复信号可能导致将不应该删除的信号删除,详见
# operator.py的create_signal()函数注释836行
target_op_dates = ['2016/07/08', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/18', '2016/07/20', '2016/07/22', '2016/07/26',
'2016/07/27', '2016/07/28', '2016/08/02', '2016/08/03',
'2016/08/04', '2016/08/05', '2016/08/08', '2016/08/10',
'2016/08/16', '2016/08/18', '2016/08/24', '2016/08/26',
'2016/08/29', '2016/08/30', '2016/08/31', '2016/09/05',
'2016/09/06', '2016/09/08']
target_op_values = np.array([[0.0, 1.0, 0.0],
[0.5, -1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.5, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[0.0, -1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 1.0],
[-1.0, 0.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0]])
target_op = pd.DataFrame(data=target_op_values, index=target_op_dates, columns=['000010', '000030', '000039'])
target_op = target_op.rename(index=pd.Timestamp)
print(f'target operation list is as following:\n {target_op}')
dates_pairs = [[date1, date2, date1 == date2]
for date1, date2
in zip(target_op.index.strftime('%m-%d'), op_list.index.strftime('%m-%d'))]
signal_pairs = [[list(sig1), list(sig2), all(sig1 == sig2)]
for sig1, sig2
in zip(list(target_op.values), list(op_list.values))]
print(f'dates side by side:\n '
f'{dates_pairs}')
print(f'signals side by side:\n'
f'{signal_pairs}')
print([item[2] for item in dates_pairs])
print([item[2] for item in signal_pairs])
self.assertTrue(np.allclose(target_op.values, op_list.values, equal_nan=True))
self.assertTrue(all([date1 == date2
for date1, date2
in zip(target_op.index.strftime('%m-%d'), op_list.index.strftime('%m-%d'))]))
def test_operator_parameter_setting(self):
"""
:return:
"""
new_op = qt.Operator(selecting_types=['all'], timing_types='dma', ricon_types='urgent')
print(new_op.strategies, '\n', [qt.TimingDMA, qt.SelectingAll, qt.RiconUrgent])
print(f'info of Timing strategy in new op: \n{new_op.strategies[0].info()}')
self.op.set_parameter('t-0',
pars=(5, 10, 5),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15)),
window_length=10,
data_types=['close', 'open', 'high'])
self.op.set_parameter(stg_id='s-0',
pars=None,
opt_tag=1,
sample_freq='10d',
window_length=10,
data_types='close, open')
self.op.set_parameter(stg_id='r-0',
pars=None,
opt_tag=0,
sample_freq='d',
window_length=20,
data_types='close, open')
self.assertEqual(self.op.timing[0].pars, (5, 10, 5))
self.assertEqual(self.op.timing[0].par_boes, ((5, 10), (5, 15), (10, 15)))
self.assertEqual(self.op.op_data_freq, 'd')
self.assertEqual(self.op.op_data_types, ['close', 'high', 'open'])
self.assertEqual(self.op.opt_space_par,
([(5, 10), (5, 15), (10, 15), (0, 1)], ['discr', 'discr', 'discr', 'conti']))
self.assertEqual(self.op.max_window_length, 20)
self.assertRaises(AssertionError, self.op.set_parameter, stg_id='t-1', pars=(1, 2))
self.assertRaises(AssertionError, self.op.set_parameter, stg_id='t1', pars=(1, 2))
self.assertRaises(AssertionError, self.op.set_parameter, stg_id=32, pars=(1, 2))
self.op.set_blender('selecting', '0 and 1 or 2')
self.op.set_blender('ls', 'str-1.2')
self.assertEqual(self.op.ls_blender, 'str-1.2')
self.assertEqual(self.op.selecting_blender, '0 and 1 or 2')
self.assertEqual(self.op.selecting_blender_expr, ['or', 'and', '0', '1', '2'])
self.assertEqual(self.op.ricon_blender, 'add')
self.assertRaises(ValueError, self.op.set_blender, 'select', '0and1')
self.assertRaises(TypeError, self.op.set_blender, 35, '0 and 1')
self.assertEqual(self.op.opt_space_par,
([(5, 10), (5, 15), (10, 15), (0, 1)], ['discr', 'discr', 'discr', 'conti']))
self.assertEqual(self.op.opt_types, [1, 1, 0])
def test_exp_to_blender(self):
self.op.set_blender('selecting', '0 and 1 or 2')
self.assertEqual(self.op.selecting_blender_expr, ['or', 'and', '0', '1', '2'])
self.op.set_blender('selecting', '0 and ( 1 or 2 )')
self.assertEqual(self.op.selecting_blender_expr, ['and', '0', 'or', '1', '2'])
self.assertRaises(ValueError, self.op.set_blender, 'selecting', '0 and (1 or 2)')
def test_set_opt_par(self):
self.op.set_parameter('t-0',
pars=(5, 10, 5),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15)),
window_length=10,
data_types=['close', 'open', 'high'])
self.op.set_parameter(stg_id='s-0',
pars=(0.5,),
opt_tag=0,
sample_freq='10d',
window_length=10,
data_types='close, open')
self.op.set_parameter(stg_id='r-0',
pars=(9, -0.23),
opt_tag=1,
sample_freq='d',
window_length=20,
data_types='close, open')
self.assertEqual(self.op.timing[0].pars, (5, 10, 5))
self.assertEqual(self.op.selecting[0].pars, (0.5,))
self.assertEqual(self.op.ricon[0].pars, (9, -0.23))
self.assertEqual(self.op.opt_types, [1, 0, 1])
self.op.set_opt_par((5, 12, 9, 8, -0.1))
self.assertEqual(self.op.timing[0].pars, (5, 12, 9))
self.assertEqual(self.op.selecting[0].pars, (0.5,))
self.assertEqual(self.op.ricon[0].pars, (8, -0.1))
# test set_opt_par when opt_tag is set to be 2 (enumerate type of parameters)
self.assertRaises(ValueError, self.op.set_opt_par, (5, 12, 9, 8))
def test_stg_attribute_get_and_set(self):
self.stg = qt.TimingCrossline()
self.stg_type = 'TIMING'
self.stg_name = "CROSSLINE STRATEGY"
self.stg_text = 'Moving average crossline strategy, determine long/short position according to the cross ' \
'point' \
' of long and short term moving average prices '
self.pars = (35, 120, 10, 'buy')
self.par_boes = [(10, 250), (10, 250), (1, 100), ('buy', 'sell', 'none')]
self.par_count = 4
self.par_types = ['discr', 'discr', 'conti', 'enum']
self.opt_tag = 0
self.data_types = ['close']
self.data_freq = 'd'
self.sample_freq = 'd'
self.window_length = 270
self.assertEqual(self.stg.stg_type, self.stg_type)
self.assertEqual(self.stg.stg_name, self.stg_name)
self.assertEqual(self.stg.stg_text, self.stg_text)
self.assertEqual(self.stg.pars, self.pars)
self.assertEqual(self.stg.par_types, self.par_types)
self.assertEqual(self.stg.par_boes, self.par_boes)
self.assertEqual(self.stg.par_count, self.par_count)
self.assertEqual(self.stg.opt_tag, self.opt_tag)
self.assertEqual(self.stg.data_freq, self.data_freq)
self.assertEqual(self.stg.sample_freq, self.sample_freq)
self.assertEqual(self.stg.data_types, self.data_types)
self.assertEqual(self.stg.window_length, self.window_length)
self.stg.stg_name = 'NEW NAME'
self.stg.stg_text = 'NEW TEXT'
self.assertEqual(self.stg.stg_name, 'NEW NAME')
self.assertEqual(self.stg.stg_text, 'NEW TEXT')
self.stg.pars = (1, 2, 3, 4)
self.assertEqual(self.stg.pars, (1, 2, 3, 4))
self.stg.par_count = 3
self.assertEqual(self.stg.par_count, 3)
self.stg.par_boes = [(1, 10), (1, 10), (1, 10), (1, 10)]
self.assertEqual(self.stg.par_boes, [(1, 10), (1, 10), (1, 10), (1, 10)])
self.stg.par_types = ['conti', 'conti', 'discr', 'enum']
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'enum'])
self.stg.par_types = 'conti, conti, discr, conti'
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'conti'])
self.stg.data_types = 'close, open'
self.assertEqual(self.stg.data_types, ['close', 'open'])
self.stg.data_types = ['close', 'high', 'low']
self.assertEqual(self.stg.data_types, ['close', 'high', 'low'])
self.stg.data_freq = 'w'
self.assertEqual(self.stg.data_freq, 'w')
self.stg.window_length = 300
self.assertEqual(self.stg.window_length, 300)
def test_rolling_timing(self):
stg = TestLSStrategy()
stg_pars = {'000100': (5, 10),
'000200': (5, 10),
'000300': (5, 6)}
stg.set_pars(stg_pars)
history_data = self.hp1.values
output = stg.generate(hist_data=history_data)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
lsmask = np.array([[0., 0., 1.],
[0., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 0.],
[1., 1., 0.],
[1., 1., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.]])
# TODO: Issue to be solved: the np.nan value are converted to 0 in the lsmask,这样做可能会有意想不到的后果
# TODO: 需要解决nan值的问题
self.assertEqual(output.shape, lsmask.shape)
self.assertTrue(np.allclose(output, lsmask, equal_nan=True))
def test_sel_timing(self):
stg = TestSelStrategy()
stg_pars = ()
stg.set_pars(stg_pars)
history_data = self.hp1['high, low, close', :, :]
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
def test_simple_timing(self):
stg = TestSigStrategy()
stg_pars = (0.2, 0.02, -0.02)
stg.set_pars(stg_pars)
history_data = self.hp1['close, open, high, low', :, 3:50]
output = stg.generate(hist_data=history_data, shares=self.shares, dates=self.date_indices)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
sigmatrix = np.array([[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, -1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0]])
side_by_side_array = np.array([[i, out_line, sig_line]
for
i, out_line, sig_line
in zip(range(len(output)), output, sigmatrix)])
print(f'output and signal matrix lined up side by side is \n'
f'{side_by_side_array}')
self.assertEqual(sigmatrix.shape, output.shape)
self.assertTrue(np.allclose(output, sigmatrix))
def test_sel_finance(self):
"""Test selecting_finance strategy, test all built-in strategy parameters"""
stg = SelectingFinanceIndicator()
stg_pars = (False, 'even', 'greater', 0, 0, 0.67)
stg.set_pars(stg_pars)
stg.window_length = 5
stg.data_freq = 'd'
stg.sample_freq = '10d'
stg.sort_ascending = False
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg._poq = 0.67
history_data = self.hp2.values
print(f'Start to test financial selection parameter {stg_pars}')
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get mininum factor
stg_pars = (True, 'even', 'less', 1, 1, 0.67)
stg.sort_ascending = True
stg.condition = 'less'
stg.lbound = 1
stg.ubound = 1
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get max factor in linear weight
stg_pars = (False, 'linear', 'greater', 0, 0, 0.67)
stg.sort_ascending = False
stg.weighting = 'linear'
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.66667, 0.00000],
[0.33333, 0.66667, 0.00000],
[0.33333, 0.66667, 0.00000]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get max factor in linear weight
stg_pars = (False, 'proportion', 'greater', 0, 0, 0.67)
stg.sort_ascending = False
stg.weighting = 'proportion'
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.91667, 0.00000],
[0.08333, 0.91667, 0.00000],
[0.08333, 0.91667, 0.00000]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask, 0.001))
# test single factor, get max factor in linear weight, threshold 0.2
stg_pars = (False, 'even', 'greater', 0.2, 0.2, 0.67)
stg.sort_ascending = False
stg.weighting = 'even'
stg.condition = 'greater'
stg.lbound = 0.2
stg.ubound = 0.2
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask, 0.001))
class TestLog(unittest.TestCase):
def test_init(self):
pass
class TestConfig(unittest.TestCase):
"""测试Config对象以及QT_CONFIG变量的设置和获取值"""
def test_init(self):
pass
def test_invest(self):
pass
def test_pars_string_to_type(self):
_parse_string_kwargs('000300', 'asset_pool', _valid_qt_kwargs())
class TestHistoryPanel(unittest.TestCase):
def setUp(self):
print('start testing HistoryPanel object\n')
self.data = np.random.randint(10, size=(5, 10, 4))
self.index = pd.date_range(start='20200101', freq='d', periods=10)
self.index2 = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06',
'2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12',
'2016-07-13', '2016-07-14']
self.index3 = '2016-07-01, 2016-07-04, 2016-07-05, 2016-07-06, 2016-07-07, ' \
'2016-07-08, 2016-07-11, 2016-07-12, 2016-07-13, 2016-07-14'
self.shares = '000100,000101,000102,000103,000104'
self.htypes = 'close,open,high,low'
self.data2 = np.random.randint(10, size=(10, 5))
self.data3 = np.random.randint(10, size=(10, 4))
self.data4 = np.random.randint(10, size=(10))
self.hp = qt.HistoryPanel(values=self.data, levels=self.shares, columns=self.htypes, rows=self.index)
self.hp2 = qt.HistoryPanel(values=self.data2, levels=self.shares, columns='close', rows=self.index)
self.hp3 = qt.HistoryPanel(values=self.data3, levels='000100', columns=self.htypes, rows=self.index2)
self.hp4 = qt.HistoryPanel(values=self.data4, levels='000100', columns='close', rows=self.index3)
self.hp5 = qt.HistoryPanel(values=self.data)
self.hp6 = qt.HistoryPanel(values=self.data, levels=self.shares, rows=self.index3)
def test_properties(self):
""" test all properties of HistoryPanel
"""
self.assertFalse(self.hp.is_empty)
self.assertEqual(self.hp.row_count, 10)
self.assertEqual(self.hp.column_count, 4)
self.assertEqual(self.hp.level_count, 5)
self.assertEqual(self.hp.shape, (5, 10, 4))
self.assertSequenceEqual(self.hp.htypes, ['close', 'open', 'high', 'low'])
self.assertSequenceEqual(self.hp.shares, ['000100', '000101', '000102', '000103', '000104'])
self.assertSequenceEqual(list(self.hp.hdates), list(self.index))
self.assertDictEqual(self.hp.columns, {'close': 0, 'open': 1, 'high': 2, 'low': 3})
self.assertDictEqual(self.hp.levels, {'000100': 0, '000101': 1, '000102': 2, '000103': 3, '000104': 4})
row_dict = {Timestamp('2020-01-01 00:00:00', freq='D'): 0,
Timestamp('2020-01-02 00:00:00', freq='D'): 1,
Timestamp('2020-01-03 00:00:00', freq='D'): 2,
Timestamp('2020-01-04 00:00:00', freq='D'): 3,
Timestamp('2020-01-05 00:00:00', freq='D'): 4,
Timestamp('2020-01-06 00:00:00', freq='D'): 5,
Timestamp('2020-01-07 00:00:00', freq='D'): 6,
Timestamp('2020-01-08 00:00:00', freq='D'): 7,
Timestamp('2020-01-09 00:00:00', freq='D'): 8,
Timestamp('2020-01-10 00:00:00', freq='D'): 9}
self.assertDictEqual(self.hp.rows, row_dict)
def test_len(self):
""" test the function len(HistoryPanel)
:return:
"""
self.assertEqual(len(self.hp), 10)
def test_empty_history_panel(self):
"""测试空HP或者特殊HP如维度标签为纯数字的HP"""
test_hp = qt.HistoryPanel(self.data)
self.assertFalse(test_hp.is_empty)
self.assertIsInstance(test_hp, qt.HistoryPanel)
self.assertEqual(test_hp.shape[0], 5)
self.assertEqual(test_hp.shape[1], 10)
self.assertEqual(test_hp.shape[2], 4)
self.assertEqual(test_hp.level_count, 5)
self.assertEqual(test_hp.row_count, 10)
self.assertEqual(test_hp.column_count, 4)
self.assertEqual(test_hp.shares, list(range(5)))
self.assertEqual(test_hp.hdates, list(pd.date_range(start='20200730', periods=10, freq='d')))
self.assertEqual(test_hp.htypes, list(range(4)))
self.assertTrue(np.allclose(test_hp.values, self.data))
print(f'shares: {test_hp.shares}\nhtypes: {test_hp.htypes}')
print(test_hp)
empty_hp = qt.HistoryPanel()
self.assertTrue(empty_hp.is_empty)
self.assertIsInstance(empty_hp, qt.HistoryPanel)
self.assertEqual(empty_hp.shape[0], 0)
self.assertEqual(empty_hp.shape[1], 0)
self.assertEqual(empty_hp.shape[2], 0)
self.assertEqual(empty_hp.level_count, 0)
self.assertEqual(empty_hp.row_count, 0)
self.assertEqual(empty_hp.column_count, 0)
def test_create_history_panel(self):
""" test the creation of a HistoryPanel object by passing all data explicitly
"""
self.assertIsInstance(self.hp, qt.HistoryPanel)
self.assertEqual(self.hp.shape[0], 5)
self.assertEqual(self.hp.shape[1], 10)
self.assertEqual(self.hp.shape[2], 4)
self.assertEqual(self.hp.level_count, 5)
self.assertEqual(self.hp.row_count, 10)
self.assertEqual(self.hp.column_count, 4)
self.assertEqual(list(self.hp.levels.keys()), self.shares.split(','))
self.assertEqual(list(self.hp.columns.keys()), self.htypes.split(','))
self.assertEqual(list(self.hp.rows.keys())[0], pd.Timestamp('20200101'))
self.assertIsInstance(self.hp2, qt.HistoryPanel)
self.assertEqual(self.hp2.shape[0], 5)
self.assertEqual(self.hp2.shape[1], 10)
self.assertEqual(self.hp2.shape[2], 1)
self.assertEqual(self.hp2.level_count, 5)
self.assertEqual(self.hp2.row_count, 10)
self.assertEqual(self.hp2.column_count, 1)
self.assertEqual(list(self.hp2.levels.keys()), self.shares.split(','))
self.assertEqual(list(self.hp2.columns.keys()), ['close'])
self.assertEqual(list(self.hp2.rows.keys())[0], pd.Timestamp('20200101'))
self.assertIsInstance(self.hp3, qt.HistoryPanel)
self.assertEqual(self.hp3.shape[0], 1)
self.assertEqual(self.hp3.shape[1], 10)
self.assertEqual(self.hp3.shape[2], 4)
self.assertEqual(self.hp3.level_count, 1)
self.assertEqual(self.hp3.row_count, 10)
self.assertEqual(self.hp3.column_count, 4)
self.assertEqual(list(self.hp3.levels.keys()), ['000100'])
self.assertEqual(list(self.hp3.columns.keys()), self.htypes.split(','))
self.assertEqual(list(self.hp3.rows.keys())[0], pd.Timestamp('2016-07-01'))
self.assertIsInstance(self.hp4, qt.HistoryPanel)
self.assertEqual(self.hp4.shape[0], 1)
self.assertEqual(self.hp4.shape[1], 10)
self.assertEqual(self.hp4.shape[2], 1)
self.assertEqual(self.hp4.level_count, 1)
self.assertEqual(self.hp4.row_count, 10)
self.assertEqual(self.hp4.column_count, 1)
self.assertEqual(list(self.hp4.levels.keys()), ['000100'])
self.assertEqual(list(self.hp4.columns.keys()), ['close'])
self.assertEqual(list(self.hp4.rows.keys())[0], pd.Timestamp('2016-07-01'))
self.hp5.info()
self.assertIsInstance(self.hp5, qt.HistoryPanel)
self.assertTrue(np.allclose(self.hp5.values, self.data))
self.assertEqual(self.hp5.shape[0], 5)
self.assertEqual(self.hp5.shape[1], 10)
self.assertEqual(self.hp5.shape[2], 4)
self.assertEqual(self.hp5.level_count, 5)
self.assertEqual(self.hp5.row_count, 10)
self.assertEqual(self.hp5.column_count, 4)
self.assertEqual(list(self.hp5.levels.keys()), [0, 1, 2, 3, 4])
self.assertEqual(list(self.hp5.columns.keys()), [0, 1, 2, 3])
self.assertEqual(list(self.hp5.rows.keys())[0], pd.Timestamp('2020-07-30'))
self.hp6.info()
self.assertIsInstance(self.hp6, qt.HistoryPanel)
self.assertTrue(np.allclose(self.hp6.values, self.data))
self.assertEqual(self.hp6.shape[0], 5)
self.assertEqual(self.hp6.shape[1], 10)
self.assertEqual(self.hp6.shape[2], 4)
self.assertEqual(self.hp6.level_count, 5)
self.assertEqual(self.hp6.row_count, 10)
self.assertEqual(self.hp6.column_count, 4)
self.assertEqual(list(self.hp6.levels.keys()), ['000100', '000101', '000102', '000103', '000104'])
self.assertEqual(list(self.hp6.columns.keys()), [0, 1, 2, 3])
self.assertEqual(list(self.hp6.rows.keys())[0], pd.Timestamp('2016-07-01'))
# Error testing during HistoryPanel creating
# shape does not match
self.assertRaises(AssertionError,
qt.HistoryPanel,
self.data,
levels=self.shares, columns='close', rows=self.index)
# valus is not np.ndarray
self.assertRaises(AssertionError,
qt.HistoryPanel,
list(self.data))
# dimension/shape does not match
self.assertRaises(AssertionError,
qt.HistoryPanel,
self.data2,
levels='000100', columns=self.htypes, rows=self.index)
# value dimension over 3
self.assertRaises(AssertionError,
qt.HistoryPanel,
np.random.randint(10, size=(5, 10, 4, 2)))
# lebel value not valid
self.assertRaises(ValueError,
qt.HistoryPanel,
self.data2,
levels=self.shares, columns='close',
rows='a,b,c,d,e,f,g,h,i,j')
def test_history_panel_slicing(self):
"""测试HistoryPanel的各种切片方法
包括通过标签名称切片,通过数字切片,通过逗号分隔的标签名称切片,通过冒号分隔的标签名称切片等切片方式"""
self.assertTrue(np.allclose(self.hp['close'], self.data[:, :, 0:1]))
self.assertTrue(np.allclose(self.hp['close,open'], self.data[:, :, 0:2]))
self.assertTrue(np.allclose(self.hp[['close', 'open']], self.data[:, :, 0:2]))
self.assertTrue(np.allclose(self.hp['close:high'], self.data[:, :, 0:3]))
self.assertTrue(np.allclose(self.hp['close,high'], self.data[:, :, [0, 2]]))
self.assertTrue(np.allclose(self.hp[:, '000100'], self.data[0:1, :, ]))
self.assertTrue(np.allclose(self.hp[:, '000100,000101'], self.data[0:2, :]))
self.assertTrue(np.allclose(self.hp[:, ['000100', '000101']], self.data[0:2, :]))
self.assertTrue(np.allclose(self.hp[:, '000100:000102'], self.data[0:3, :]))
self.assertTrue(np.allclose(self.hp[:, '000100,000102'], self.data[[0, 2], :]))
self.assertTrue(np.allclose(self.hp['close,open', '000100,000102'], self.data[[0, 2], :, 0:2]))
print('start testing HistoryPanel')
data = np.random.randint(10, size=(10, 5))
# index = pd.date_range(start='20200101', freq='d', periods=10)
shares = '000100,000101,000102,000103,000104'
dtypes = 'close'
df = pd.DataFrame(data)
print('=========================\nTesting HistoryPanel creation from DataFrame')
hp = qt.dataframe_to_hp(df=df, shares=shares, htypes=dtypes)
hp.info()
hp = qt.dataframe_to_hp(df=df, shares='000100', htypes='close, open, high, low, middle', column_type='htypes')
hp.info()
print('=========================\nTesting HistoryPanel creation from initialization')
data = np.random.randint(10, size=(5, 10, 4)).astype('float')
index = pd.date_range(start='20200101', freq='d', periods=10)
dtypes = 'close, open, high,low'
data[0, [5, 6, 9], [0, 1, 3]] = np.nan
data[1:4, [4, 7, 6, 2], [1, 1, 3, 0]] = np.nan
data[4:5, [2, 9, 1, 2], [0, 3, 2, 1]] = np.nan
hp = qt.HistoryPanel(data, levels=shares, columns=dtypes, rows=index)
hp.info()
print('==========================\n输出close类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close', :, :], data[:, :, 0:1], equal_nan=True))
print(f'==========================\n输出close和open类型的所有历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1], :, :], data[:, :, 0:2], equal_nan=True))
print(f'==========================\n输出第一只股票的所有类型历史数据\n')
self.assertTrue(np.allclose(hp[:, [0], :], data[0:1, :, :], equal_nan=True))
print('==========================\n输出第0、1、2个htype对应的所有股票全部历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1, 2]], data[:, :, 0:3], equal_nan=True))
print('==========================\n输出close、high两个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp[['close', 'high']], data[:, :, [0, 2]], equal_nan=True))
print('==========================\n输出0、1两个htype的所有历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1]], data[:, :, 0:2], equal_nan=True))
print('==========================\n输出close、high两个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close,high'], data[:, :, [0, 2]], equal_nan=True))
print('==========================\n输出close起到high止的三个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close:high'], data[:, :, 0:3], equal_nan=True))
print('==========================\n输出0、1、3三个股票的全部历史数据\n')
self.assertTrue(np.allclose(hp[:, [0, 1, 3]], data[[0, 1, 3], :, :], equal_nan=True))
print('==========================\n输出000100、000102两只股票的所有历史数据\n')
self.assertTrue(np.allclose(hp[:, ['000100', '000102']], data[[0, 2], :, :], equal_nan=True))
print('==========================\n输出0、1、2三个股票的历史数据\n', hp[:, 0: 3])
self.assertTrue(np.allclose(hp[:, 0: 3], data[0:3, :, :], equal_nan=True))
print('==========================\n输出000100、000102两只股票的所有历史数据\n')
self.assertTrue(np.allclose(hp[:, '000100, 000102'], data[[0, 2], :, :], equal_nan=True))
print('==========================\n输出所有股票的0-7日历史数据\n')
self.assertTrue(np.allclose(hp[:, :, 0:8], data[:, 0:8, :], equal_nan=True))
print('==========================\n输出000100股票的0-7日历史数据\n')
self.assertTrue(np.allclose(hp[:, '000100', 0:8], data[0, 0:8, :], equal_nan=True))
print('==========================\nstart testing multy axis slicing of HistoryPanel object')
print('==========================\n输出000100、000120两只股票的close、open两组历史数据\n',
hp['close,open', ['000100', '000102']])
print('==========================\n输出000100、000120两只股票的close到open三组历史数据\n',
hp['close,open', '000100, 000102'])
print(f'historyPanel: hp:\n{hp}')
print(f'data is:\n{data}')
hp.htypes = 'open,high,low,close'
hp.info()
hp.shares = ['000300', '600227', '600222', '000123', '000129']
hp.info()
def test_relabel(self):
new_shares_list = ['000001', '000002', '000003', '000004', '000005']
new_shares_str = '000001, 000002, 000003, 000004, 000005'
new_htypes_list = ['close', 'volume', 'value', 'exchange']
new_htypes_str = 'close, volume, value, exchange'
temp_hp = self.hp.copy()
temp_hp.re_label(shares=new_shares_list)
print(temp_hp.info())
print(temp_hp.htypes)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.htypes, temp_hp.htypes)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.shares, new_shares_list)
temp_hp = self.hp.copy()
temp_hp.re_label(shares=new_shares_str)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.htypes, temp_hp.htypes)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.shares, new_shares_list)
temp_hp = self.hp.copy()
temp_hp.re_label(htypes=new_htypes_list)
self.assertTrue( | np.allclose(self.hp.values, temp_hp.values) | numpy.allclose |
import numpy as np
from __context__ import src
from src import mathutils as mu
def estimateHomographies(allDetections: list):
"""
Input:
allDetections -- list of tuples (one for each view).
Each tuple is (Xa, Xb), a set of sensor points
and model points respectively
Output:
Hs -- list of homographies, one for each view
"""
Hs = []
for Xa, Xb in allDetections:
H = estimateHomography(Xa[:,:2], Xb[:,:2])
Hs.append(H)
return Hs
def estimateHomography(Xa: np.ndarray, Xb: np.ndarray):
"""
Estimate homography using DLT
Inputs:
Xa -- 2D points in sensor
Xb -- 2D model points
Output:
aHb -- homography matrix which relates the model plane (points Xb)
to the sensor plane (points Xa)
Rearrange into the formulation:
M * h = 0
M represents the model and sensor point correspondences
h is a vector representation of the homography aHb we are trying to find:
h = (h11, h12, h13, h21, h22, h23, h31, h32, h33).T
Prior to constructing M, the points Xa and Xb need to be 'normalized'
so that the results of SVD are more well behaved.
"""
mu.validateShape(Xa.shape, (None, 2))
mu.validateShape(Xb.shape, (None, 2))
Na = computeNormalizationMatrix(Xa)
Nb = computeNormalizationMatrix(Xb)
N = Xa.shape[0]
M = np.zeros((2*N, 9))
for i in range(N):
ui, vi = mu.unhom(Na @ mu.hom(Xa[i]))
Xi, Yi = mu.unhom(Nb @ mu.hom(Xb[i]))
M[2*i,:] = (-Xi, -Yi, -1, 0, 0, 0, ui * Xi, ui * Yi, ui)
M[2*i+1,:] = ( 0, 0, 0, -Xi, -Yi, -1, vi * Xi, vi * Yi, vi)
U, S, V_T = np.linalg.svd(M)
h = V_T[-1]
Hp = h.reshape(3,3)
H = np.linalg.inv(Na) @ Hp @ Nb
H /= H[2,2]
return H
def computeNormalizationMatrix(X):
"""
Compute a matrix M which maps a set of points X to their 'normalized'
form Xnorm, i.e.
Xnorm = unhom(M * hom(X))
where the mean Euclidean distance of the of points in Xnorm is sqrt(2)
and the centroid of the points is the origin.
For more on why this is necessary, see 'Multiple View Geometry in Computer Vision,
2nd edition', <NAME>, §4.4.4, pg 108.
"""
Xmean = np.mean(X, axis=0)
M1 = np.array([
[1, 0, -Xmean[0]],
[0, 1, -Xmean[1]],
[0, 0, 1],
])
Xshifted = X - Xmean
Xmagnitudes = np.linalg.norm(Xshifted, axis=1)
meanMagnitude = np.mean(Xmagnitudes)
scaleFactor = np.sqrt(2) / meanMagnitude
M2 = np.array([
[scaleFactor, 0, 0],
[0, scaleFactor, 0],
[0, 0, 1],
])
M = M2 @ M1
return M
def computeIntrinsicMatrix(Hs: list):
"""
Compute the intrinsic matrix from a list of homographies
Inputs:
Hs -- list of homographies
Output:
A -- intrinsic camera matrix
From the Burger paper, use equations 96 - 98 to solve for vector b = (B0, B1, B2, B3, B4, B5)^T
and then compute the intrinsic matrix and return.
Notes:
H = [h0 h1 h2] = lambda * A * [r0 r1 t]
By leveraging that r0 and r1 are orthonormal, we get:
h0^T * (A^-1)^T * A^-1 * h1 = 0
h0^T * (A^-1)^T * A^-1 * h0 = h1^T * (A^-1)^T * A^-1 * h1
B = (A^-1)^T * A^-1, where B = [B0 B1 B3]
[B1 B2 B4]
[B3 B4 B5]
simplifying:
h0^T * B * h1 = 0
h0^T * B * h0 - h1^T * B * h1 = 0
letting b = (B0, B1, B2, B3, B4, B5)^T
we reformulate the h^T * B * h form:
hp^T * B * hq = vecpq(H) * b
with vec(H, p, q) = (
H0p * H0q,
H0p * H1q + H1p * H0q,
H1p * H1q,
H2p * H0q + H0p * H2q,
H2p * H1q + H1p * H2q,
H2p * H2q,
)
so we can rewrite our system of equations for a single homography as:
[ vec(H, 0, 1) ] * b = 0
[vec(H, 0, 0) - vec(H, 1, 1) ]
Now we can stack these terms in the left matrix for each homography to create
a matrix V of size (2*N, 6) and then solve for b with SVD.
V * b = 0
"""
N = len(Hs)
V = np.zeros((2*N, 6))
for i in range(N):
H = Hs[i]
V[2*i,:] = vecHomography(H, 0, 1)
V[2*i+1,:] = vecHomography(H, 0, 0) - vecHomography(H, 1, 1)
U, S, V_T = np.linalg.svd(V)
b = tuple(V_T[-1])
A = computeIntrinsicMatrixFrombCholesky(b)
if np.sum(np.isnan(A)) > 0:
raise ValueError(f"Computed intrinsic matrix contains NaN: \n{A}")
return A
def vecHomography(H: np.ndarray, p: int, q: int):
"""
Input:
H -- 3x3 homography matrix
p -- first column index
q -- second column index
Output:
v -- 1x6 vector containing components of H based on the
indices p and q which represent columns of the homography H
Notes:
This format of v allows the product to be used in homogenous
form to solve for the values
of a matrix which is a product of the intrinsic parameters (B).
Implements equation 96 of Burger
"""
values = (
H[0,p] * H[0,q],
H[0,p] * H[1,q] + H[1,p] * H[0,q],
H[1,p] * H[1,q],
H[2,p] * H[0,q] + H[0,p] * H[2,q],
H[2,p] * H[1,q] + H[1,p] * H[2,q],
H[2,p] * H[2,q],
)
v = | np.array(values) | numpy.array |
'''
Transfer learning satterlite image using the crfasrnn
by <NAME>
'''
import tensorflow as tf
import os
import keras.backend as K
def assignGPU(gpu):
os.environ["CUDA_VISIBLE_DEVICES"]="%s" % (gpu)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
K.set_session(tf.Session(config=config))
assignGPU(2)
import sys
sys.path.insert(1, './src')
from keras.models import Model
from keras import optimizers
from keras.layers import Conv2D, MaxPooling2D, Input, ZeroPadding2D, \
Dropout, Conv2DTranspose, Cropping2D, Add
from crfrnn_layer import CrfRnnLayer
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
import h5py
import util
from PIL import Image
IMAGE_PATH = './images/'
MASKS_PATH = './masks/'
image_ids=[]
mask_ids=[]
channels, height, width = 3, 100, 100
for root,dirs,image_ids in os.walk(IMAGE_PATH):
print(image_ids)
for rroot,rdirs,mask_ids in os.walk(MASKS_PATH):
print(mask_ids)
image_ids.sort()
mask_ids.sort()
X = np.zeros((len(mask_ids), height, width,3), dtype=np.float32)
for i in range(len(image_ids)):
X[i],imgh,imgw=util.get_preprocessed_image(IMAGE_PATH+image_ids[i])
Y = np.zeros((len(mask_ids), height, width,5), dtype=np.int32)
labelpattern=[[255,255,255,255], #white, background
[0,128,0,255], #irrigate 01
[0,0,255,255], #non-irrigate
[0,0,0,255], #irrigate 02
[255,192,203,255]] #irrigate 03
for i in range(len(mask_ids)):
im = np.array(Image.open(MASKS_PATH+mask_ids[i])).astype(np.int32)
for j in range(len(labelpattern)):
temp=im-labelpattern[j]
temp=np.sum(temp,axis=2)
b=np.where(temp==0)
temparray= | np.zeros(5) | numpy.zeros |
# Creating Numpy Arrays
import numpy as np
# Converting from a list
# Lets start with a list
my_list1 = [1, 2, 3, 4]
my_array1 = np.array(my_list1)
# Make another list
my_list2 = [11, 22, 33, 44]
# Make a list of lists
my_lists = [my_list1, my_list2]
# Make multi-dimensional array
my_array2 = np.array(my_lists)
# Lets get the size of the array
my_array2.shape
# Find out the data tyoe of the array
my_array2.dtype
# Making special case arrays
# Zeros
np.zeros(5)
# Ones
np.ones((5, 5))
# An empty array
np.empty(5)
np.empty((3, 4))
# Identity array
np.eye(5)
# Using a range
| np.arange(5) | numpy.arange |
import numpy as np
# Box_1 and box_2 have different center points, and their last dimension is 4 (x, y, w, h).
class IOUDifferentXY():
def __init__(self, box_1, box_2):
super(IOUDifferentXY, self).__init__()
self.box_1_min, self.box_1_max = IOUDifferentXY.__get_box_min_and_max(box_1)
self.box_2_min, self.box_2_max = IOUDifferentXY.__get_box_min_and_max(box_2)
self.box_1_area = IOUDifferentXY.__get_box_area(box_1)
self.box_2_area = IOUDifferentXY.__get_box_area(box_2)
@staticmethod
def __get_box_min_and_max(box):
box_xy = box[..., 0:2]
box_wh = box[..., 2:4]
box_min = box_xy - box_wh / 2
box_max = box_xy + box_wh / 2
return box_min, box_max
@staticmethod
def __get_box_area(box):
return box[..., 2] * box[..., 3]
def calculate_iou(self):
intersect_min = | np.maximum(self.box_1_min, self.box_2_min) | numpy.maximum |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(139, 'I 4/m m m', transformations)
space_groups[139] = sg
space_groups['I 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(140, 'I 4/m c m', transformations)
space_groups[140] = sg
space_groups['I 4/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(141, 'I 41/a m d :2', transformations)
space_groups[141] = sg
space_groups['I 41/a m d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(142, 'I 41/a c d :2', transformations)
space_groups[142] = sg
space_groups['I 41/a c d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(143, 'P 3', transformations)
space_groups[143] = sg
space_groups['P 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(144, 'P 31', transformations)
space_groups[144] = sg
space_groups['P 31'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(145, 'P 32', transformations)
space_groups[145] = sg
space_groups['P 32'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(146, 'R 3 :H', transformations)
space_groups[146] = sg
space_groups['R 3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(147, 'P -3', transformations)
space_groups[147] = sg
space_groups['P -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(148, 'R -3 :H', transformations)
space_groups[148] = sg
space_groups['R -3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(149, 'P 3 1 2', transformations)
space_groups[149] = sg
space_groups['P 3 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(150, 'P 3 2 1', transformations)
space_groups[150] = sg
space_groups['P 3 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(151, 'P 31 1 2', transformations)
space_groups[151] = sg
space_groups['P 31 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(152, 'P 31 2 1', transformations)
space_groups[152] = sg
space_groups['P 31 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(153, 'P 32 1 2', transformations)
space_groups[153] = sg
space_groups['P 32 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(154, 'P 32 2 1', transformations)
space_groups[154] = sg
space_groups['P 32 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(155, 'R 3 2 :H', transformations)
space_groups[155] = sg
space_groups['R 3 2 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(156, 'P 3 m 1', transformations)
space_groups[156] = sg
space_groups['P 3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(157, 'P 3 1 m', transformations)
space_groups[157] = sg
space_groups['P 3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(158, 'P 3 c 1', transformations)
space_groups[158] = sg
space_groups['P 3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(159, 'P 3 1 c', transformations)
space_groups[159] = sg
space_groups['P 3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(160, 'R 3 m :H', transformations)
space_groups[160] = sg
space_groups['R 3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(161, 'R 3 c :H', transformations)
space_groups[161] = sg
space_groups['R 3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(162, 'P -3 1 m', transformations)
space_groups[162] = sg
space_groups['P -3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(163, 'P -3 1 c', transformations)
space_groups[163] = sg
space_groups['P -3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(164, 'P -3 m 1', transformations)
space_groups[164] = sg
space_groups['P -3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(165, 'P -3 c 1', transformations)
space_groups[165] = sg
space_groups['P -3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = | N.array([3,3,3]) | numpy.array |
import copy
import logging
import math
import random
import warnings
from typing import List, Tuple
import numpy as np
import pandas as pd
import torch
logger = logging.getLogger()
EXPL_LENGTHS_TRAIN_20 = [(1, 208),
(2, 299),
(3, 354),
(4, 375),
(5, 298),
(6, 298),
(7, 224),
(8, 160),
(9, 140),
(10, 99),
(11, 61),
(12, 56),
(13, 45),
(14, 22),
(15, 21),
(16, 15),
(17, 13),
(18, 9),
(19, 5),
(20, 3),
(21, 9),
(22, 1)]
FREQS = np.array([c for i, c in EXPL_LENGTHS_TRAIN_20])
EXPL_LENGTH_FREQS = FREQS / sum(FREQS)
def gold_facts_in_n_closest_all(dataset, nearest_k):
results = {}
for i, row in dataset.qa_feats.iterrows():
gold_to_find = set(copy.deepcopy(row.gold_facts))
visible_facts = set(row.closest[:nearest_k])
lens_visible = []
gold_found = set()
while len(gold_to_find) > 0:
lens_visible.append(len(visible_facts))
found = set([fact for fact in gold_to_find if fact in visible_facts])
if len(found) == 0:
break
gold_found = gold_found.union(found)
gold_to_find -= found
for fact in found:
visible_from_fact = set(dataset.fact_feats.iloc[fact].closest[:nearest_k])
visible_facts = visible_facts.union(visible_from_fact)
results[i] = {
'all': set(copy.deepcopy(row.gold_facts)),
'found': gold_found,
'not_found': gold_to_find,
'mean_len_visible': np.mean(lens_visible)
}
return results
def gold_facts_in_n_closest_cur(dataset, nearest_k):
results = {}
for i, row in dataset.qa_feats.iterrows():
gold_to_find = set(copy.deepcopy(row.gold_facts))
visible_facts = set(row.closest[:nearest_k])
lens_visible = []
gold_found = set()
while len(gold_to_find) > 0:
lens_visible.append(len(visible_facts))
facts = [fact for fact in gold_to_find if fact in visible_facts]
if len(facts) == 0:
break
selected = facts[0]
gold_found.add(selected)
gold_to_find -= {selected}
visible_facts = dataset.fact_feats.iloc[selected].closest[:nearest_k]
results[i] = {
'all': set(copy.deepcopy(row.gold_facts)),
'found': gold_found,
'not_found': gold_to_find,
'mean_len_visible': np.mean(lens_visible)
}
return results
def find_nearest_k_for_rate(dataset, target_rate, func=gold_facts_in_n_closest_all, start_at=0):
k = start_at
results = func(dataset, k)
nb_all = sum([len(res['all']) for res in results.values()])
while True:
nb_found = sum([len(res['found']) for res in results.values()])
mean_len_visible = np.mean([res['mean_len_visible'] for res in results.values()])
rate = nb_found / nb_all
if rate > target_rate:
break
k += 10
print('Trying k = %s, rate was %s' % (k, rate))
results = func(dataset, k)
return k, rate, mean_len_visible
def nCr(n, r):
f = math.factorial
return f(n) // f(r) // f(n - r)
def nb_combinations(dataset):
def q_nb_combinations(nb_facts):
return sum([dataset.nCr(nb_facts, i) for i in range(1, nb_facts)])
return sum([q_nb_combinations(len(gf)) for gf in dataset.qa_feats.gold_facts])
def nb_samples(dataset):
combs = [(i, sum([dataset.nCr(i, j) for j in range(0, i + 1)]))
for i in range(1, 23)]
lens = [(i, len([row for _, row in dataset.qa_feats.iterrows() if len(row.gold_facts) == i]))
for i in range(1, 23)]
tot = [(combs[i][0], combs[i][1] * lens[i][1]) for i in range(22)]
cum_tot = np.cumsum([count for _, count in tot])
real_counts = [(i + 1, c + sum(combs[i][1] * lens[j][1] for j in range(i + 1, 22)))
for i, c in enumerate(cum_tot)]
return combs, lens, tot, real_counts
def max_length_of_explanation(dataset):
"""
make sure that this fits in language model (max seq length - max_position_embeddings)
>> 19: (344, 91.55, 21, 734)
"""
max_length = 0
lengths = []
for i, row in dataset.qa_feats.iterrows():
qa_tok = row.tokenized
facts = list(dataset.fact_feats.iloc[list(row.gold_facts)].tokenized)
encoded = qa_tok + [t for fact in facts for t in fact]
length = len(encoded)
if length > max_length:
max_length = length
lengths.append(length)
longest_qa = sorted(list(dataset.qa_feats.tokenized),
key=lambda x: len(x), reverse=True)[0]
max_nb_facts = max([len(gf) for gf in dataset.qa_feats.gold_facts])
longest_facts = sorted(list(dataset.fact_feats.tokenized),
key=lambda x: len(x), reverse=True)[:max_nb_facts]
flattened_longest_facts = [t for fact in longest_facts for t in fact]
return (max_length, sum(lengths) / len(lengths), max_nb_facts,
len(longest_qa) + len(flattened_longest_facts))
POINTWISE_LOSSES = ['xent', 'mse', 'xent-2']
BATCHWISE_LOSSES = ['fisher']
NOISE_CONTRASTIVE_LOSSES: List[str] = ['nce', 'ranking-nce', 'binary-nce']
CONTRASTIVE_LOSSES = ['ranknet', 'lambdaloss', 'margin-pairs'] + NOISE_CONTRASTIVE_LOSSES
def should_recompute_lengths(args, dataset, train, valid, infer):
return (
(train and (not hasattr(dataset, 'max_length_in_batches_single_q')
and not hasattr(dataset, 'max_length_in_batches')))
or args.nearest_k_visible != dataset.nearest_k_visible
or (train and (dataset.batch_size != args.train_batch_size or
dataset.tokens_per_batch != args.train_tokens_per_batch))
or ((valid or infer) and (dataset.batch_size != args.eval_batch_size or
dataset.tokens_per_batch != args.eval_tokens_per_batch))
)
def read_explanations(path: str) -> List[Tuple[str, str]]:
header = []
uid = None
df = pd.read_csv(path, sep='\t', dtype=str)
for name in df.columns:
if name.startswith('[SKIP]'):
if 'UID' in name and not uid:
uid = name
else:
header.append(name)
if not uid or len(df) == 0:
warnings.warn('Possibly misformatted file: ' + path)
return []
return df.apply(lambda r: (r[uid], ' '.join(str(s) for s in list(r[header]) if not pd.isna(s))), 1).tolist()
def read_explanations_wo_fill(path: str) -> List[Tuple[str, str]]:
header = []
uid = None
df = pd.read_csv(path, sep='\t', dtype=str)
for name in df.columns:
if name.startswith('[SKIP]'):
if 'UID' in name and not uid:
uid = name
else:
if not name.strip().startswith('[FILL]'): # this is the difference
header.append(name)
if not uid or len(df) == 0:
warnings.warn('Possibly misformatted file: ' + path)
return []
return df.apply(lambda r: (r[uid], ' '.join(str(s) for s in list(r[header]) if not pd.isna(s))), 1).tolist()
def worker_init_fn(x):
# this is BAD, seeds every worker in every epoch again with same seed ==> epochs won't be different.
# (workers get recreated at start of every epoch)
# seed = x
# random.seed(seed)
# np.random.seed(seed)
# torch.manual_seed(seed)
# # if torch.cuda.device_count() > 0:
# # torch.cuda.manual_seed_all(seed)
#
# rather: (https://discuss.pytorch.org/t/reproducibility-with-all-the-bells-and-whistles/81097)
worker_seed = torch.initial_seed() % 2 ** 32
| np.random.seed(worker_seed) | numpy.random.seed |
# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the License); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Description:
# Compresses and pads the weigths. It also calculates the scales and packs with the biases.
import math
from collections import namedtuple
from typing import Tuple
import numpy as np
from .api import NpuBlockTraversal
from .architecture_features import Accelerator
from .architecture_features import ArchitectureFeatures
from .data_type import DataType
from .errors import UnsupportedFeatureError
from .nn_graph import SchedulingStrategy
from .numeric_util import round_up
from .numeric_util import round_up_divide
from .operation import NpuBlockType
from .operation import Op
from .scaling import quantise_scale
from .scaling import reduced_quantise_scale
from .tensor import create_equivalence_id
from .tensor import TensorBlockTraversal
from .tensor import TensorFormat
from .tensor import TensorPurpose
from .tensor import TensorSubPurpose
from ethosu import mlw_codec
# Contains meta info for a weight compression. If two tensors have identical weight compression config,
# then they also will have identical compressed weights.
WeightCompressionConfig = namedtuple(
"WeightCompressionConfig", ["npu_block_type", "ofm_block_depth", "ofm_depth_step", "dilation", "value_id"]
)
def encode_weights(
accelerator: Accelerator,
weights_volume: np.ndarray,
dilation_xy: Tuple[int, int],
ifm_bitdepth: int,
ofm_block_depth: int,
is_depthwise: bool,
block_traversal: NpuBlockTraversal,
):
"""
Internal implementation of the public facing API to use weight encoding.
:param accelerator: architecture_features.Accelerator enum to pick the correct Ethos-U accelerator
:param weights_volume: numpy.ndarray in OHWI layout with a shape of four
:param dilation_xy: a two element tuple of dilation attributes in x,y dimension
:param ifm_bitdepth: the bitdepth of input feature map
:param ofm_block_depth: the depth of blocks for Ethos-U processing
:param is_depthwise: a boolean indicating these weights are used for a depthwise traversal
:param block_traversal: indicates how these weights are traversed on sub-kernel basis
:return: a bytearray of compressed weights
"""
# Check arg types
assert isinstance(accelerator, Accelerator)
assert isinstance(weights_volume, np.ndarray)
assert isinstance(dilation_xy, tuple)
assert isinstance(ifm_bitdepth, int)
assert isinstance(ofm_block_depth, int)
assert isinstance(is_depthwise, bool)
assert isinstance(block_traversal, NpuBlockTraversal)
# Checks for weight layout
assert len(weights_volume.shape) == 4, "weights ndarray should have a shape of 4"
# It cannot be both partkernel and depthwise
assert not (
is_depthwise and block_traversal == NpuBlockTraversal.PART_KERNEL_FIRST
), "encode_weights :: partkernel and depthwise are mutually exclusive"
# Check valid values for dilation
assert dilation_xy[0] in (1, 2), "encode_weights :: dilation x should be 1 or 2 not {}".format(dilation_xy[0])
assert dilation_xy[1] in (1, 2), "encode_weights :: dilation y should be 1 or 2 not {}".format(dilation_xy[1])
ifm_ublock = ArchitectureFeatures.accelerator_configs[accelerator].ifm_ublock
ofm_ublock = ArchitectureFeatures.accelerator_configs[accelerator].ofm_ublock
raw_stream = generate_brick(
ifm_ublock=ifm_ublock,
ofm_ublock=ofm_ublock,
brick_weights=weights_volume,
ofm_block_depth=ofm_block_depth,
is_depthwise=is_depthwise,
is_partkernel=block_traversal == NpuBlockTraversal.PART_KERNEL_FIRST,
ifm_bitdepth=ifm_bitdepth,
dilation=dilation_xy,
)
encoded_stream = encode(raw_stream)
return encoded_stream
def encode_bias(bias: np.int64, scale: int, shift: int):
"""
Internal implementation of public facing API to pack bias and scale values as required by the Ethos-U
:param bias: 64bit signed number that includes 40bit signed bias
:param scale: 32bit scale value
:param shift: 6bit shift value
:return: packed 80bit [0(2-bits),shift(6-bits),scale(32-bits),bias(40-bits)]
"""
# Check arg types
assert isinstance(bias, np.int64)
assert isinstance(scale, int)
assert isinstance(shift, int)
assert -(1 << (40 - 1)) <= bias < (1 << (40 - 1)) # signed 40-bit range
assert 0 <= scale < (1 << 32) # unsigned 32-bit range
assert 0 <= shift < (1 << 6) # unsigned 6-bit range
data = bytearray(10)
data[0] = (bias >> (0 * 8)) & 0xFF
data[1] = (bias >> (1 * 8)) & 0xFF
data[2] = (bias >> (2 * 8)) & 0xFF
data[3] = (bias >> (3 * 8)) & 0xFF
data[4] = (bias >> (4 * 8)) & 0xFF
data[5] = (scale >> (0 * 8)) & 0xFF
data[6] = (scale >> (1 * 8)) & 0xFF
data[7] = (scale >> (2 * 8)) & 0xFF
data[8] = (scale >> (3 * 8)) & 0xFF
data[9] = shift & 0x3F
return data
def create_weight_compression_config(tens, npu_block_type, ofm_block_depth, ofm_depth_step, dilation):
# Note: for an ofm block only its depth is used in weight compression.
# And block depth > ofm depth gives same result as block depth == ofm depth
block_depth = min(ofm_block_depth, tens.quant_values.shape[-1])
return WeightCompressionConfig(npu_block_type, block_depth, ofm_depth_step, dilation, tens.value_id)
def set_storage_shape(tens):
# Sets the storage shape depending on the tensor's sub purpose
if tens.sub_purpose == TensorSubPurpose.DoubleBuffer and len(tens.compressed_values) > 2:
offset = 2 * np.amax([len(x) for x in tens.compressed_values])
assert offset % 16 == 0
else:
offset = tens.weight_compressed_offsets[-1]
tens.storage_shape = [1, 1, 1, offset]
class CompressedWeightCache:
# Contains weight compressions for all weight tensors in a graph
def __init__(self):
self.cache = {} # maps from WeightCompressionConfig to a tensor clone containing compressed weights
def get_tensor_with_same_compression(self, wcc):
return self.cache.get(wcc)
def add(self, tens):
# Adds the compressed weights from the tensor to the cache
wcc = tens.weight_compression_config
# Clone the tensor to make sure that nothing related to the weight compression is modified
tens_clone = tens.clone("_weights{}_{}".format(wcc.ofm_block_depth, wcc.ofm_depth_step))
self.cache[wcc] = tens_clone
def encode(weight_stream):
if len(weight_stream) == 0:
return []
assert np.amin(weight_stream) >= -255
assert np.amax(weight_stream) <= 255
# Encode flattened signed weight stream
compressed = mlw_codec.encode(weight_stream)
# pad with 0xFF as needed so the length of the weight stream
# is a multiple of 16
while (len(compressed) % 16) != 0:
compressed.append(0xFF)
return compressed
def generate_brick(
ifm_ublock, ofm_ublock, brick_weights, ofm_block_depth, is_depthwise, is_partkernel, ifm_bitdepth, dilation
):
decomp_h = ArchitectureFeatures.SubKernelMax.height // dilation[0]
decomp_w = ArchitectureFeatures.SubKernelMax.width // dilation[1]
# Expect weights formatted OHWI
ofm_depth = brick_weights.shape[-4]
ifm_depth = brick_weights.shape[-1]
kernel_width = brick_weights.shape[-2]
kernel_height = brick_weights.shape[-3]
# IFM block depth
if is_partkernel or (ifm_bitdepth == 16):
# IFM block depth is always 16 for part-kernel-first
ifm_block_depth = 16
elif ifm_bitdepth == 8:
ifm_block_depth = 32
else:
assert False
stream = []
# Top level striping - OFM blocks in the entire brick's depth
for ofm_block_z in range(0, ofm_depth, ofm_block_depth):
clipped_ofm_block_depth = min(ofm_block_depth, ofm_depth - ofm_block_z)
# IFM blocks required for the brick
for ifm_block_z in range(0, (1 if is_depthwise else ifm_depth), ifm_block_depth):
if is_depthwise:
clipped_ifm_block_depth = ifm_ublock.depth
else:
clipped_ifm_block_depth = (
min(ifm_block_depth, ifm_depth - ifm_block_z) if is_partkernel else ifm_block_depth
)
# Weight decomposition
# Subkernel Splitting (H)
for subkernel_y in range(0, kernel_height, decomp_h):
sub_height = min(kernel_height - subkernel_y, decomp_h)
# Subkernel splitting (W)
for subkernel_x in range(0, kernel_width, decomp_w):
sub_width = min(kernel_width - subkernel_x, decomp_w)
subkernel_elements = sub_width * sub_height
# Part kernel first works across the kernel H/W and needs padding
if is_partkernel:
if ifm_bitdepth == 16 and subkernel_elements % 2 != 0:
subkernel_elements = int(math.ceil(subkernel_elements / 2) * 2)
elif ifm_bitdepth == 8 and subkernel_elements % 4 != 0:
subkernel_elements = int(math.ceil(subkernel_elements / 4) * 4)
# Depthwise Conv requires multiple of 4 kernel elements in its weight block
# this is different from normal conv which is considered "weights depth-first"
elif is_depthwise:
subkernel_elements = int(math.ceil(subkernel_elements / 4.0) * 4)
ifm_block_depth_outer = clipped_ifm_block_depth if is_partkernel else 1
ifm_block_depth_inner = 1 if is_partkernel else clipped_ifm_block_depth
# IFM Ublocks in IFM-block over depth for part-kernel-first mode
# For depth-first IFM Ublocks are traversed after subkernel elements so this loop is ignored.
for ifm_ublk_outer in range(0, ifm_block_depth_outer, ifm_ublock.depth):
# OFM Ublocks in OFM-block over depth
for ofm_ublk in range(0, clipped_ofm_block_depth, ofm_ublock.depth):
# HW Kernel element traversal - cannot be a H/W loop due to element
# padding requirement on depthwise/part-kernel configurations
for element in range(subkernel_elements):
kx = element % sub_width
ky = element // sub_width
# IFM Ublocks in IFM-block over depth (only 1 ublock if depthwise)
# In case of part-kernel-first IFM Ublock traversal have already been handled
# and this loop is ignored.
for ifm_ublk_inner in range(0, ifm_block_depth_inner, ifm_ublock.depth):
# Feed OFM ublock elements
for ofm_ublock_z in range(ofm_ublock.depth):
# Source IFM ublock elements (only 1 element deep if depthwise)
for ifm_ublock_z in range(1 if is_depthwise else ifm_ublock.depth):
# Source position within the current subkernel
wx = subkernel_x + kx
wy = subkernel_y + ky
# Source IFM/OFM slices
ifm_ublk = ifm_ublk_inner + ifm_ublk_outer
ifm_z = ifm_block_z + ifm_ublk + ifm_ublock_z
ofm_z = ofm_block_z + ofm_ublk + ofm_ublock_z
if (ifm_z >= ifm_depth) or (ofm_z >= ofm_depth) or (ky >= sub_height):
stream.append(0)
else:
stream.append(brick_weights[ofm_z][wy][wx][ifm_z])
return stream
def core_deinterleave(hwio, core, ncores):
# Put weights back into OHWI
ohwi = np.transpose(hwio, (3, 0, 1, 2))
return ohwi[core : ohwi.shape[0] : ncores]
# Compress the weights
def compress_weights(arch, nng, tens, npu_block_type, ofm_block_depth, ofm_depth_step, dilation):
assert tens.purpose == TensorPurpose.Weights
# Check the weight cache
if nng.weight_cache is None:
nng.weight_cache = CompressedWeightCache()
wcc = create_weight_compression_config(tens, npu_block_type, ofm_block_depth, ofm_depth_step, dilation)
tens.weight_compression_config = wcc
# Reassign equivalence id such that tensors with same weight compression get identical equivalence ids,
# but tensors with the same values but different compression get different equivalence ids
tens.equivalence_id = create_equivalence_id(wcc)
tens_cached = nng.weight_cache.get_tensor_with_same_compression(wcc)
if tens_cached is not None:
# Cache hit, copy weights from the cache
tens.copy_compressed_weight_info(tens_cached)
set_storage_shape(tens)
return
# No cache hit, perform the compression
assert tens.quantization is not None
assert tens.quantization.scale_f32 is not None
assert tens.quantization.zero_point is not None
zero_point = tens.quantization.zero_point
quant_buf = tens.quant_values.astype(np.int64)
# Early zero-point correction
weights = quant_buf - zero_point
if len(weights.shape) == 2:
weights = np.expand_dims(np.expand_dims(weights, axis=0), axis=0)
compression_scales = []
compressed_offsets = []
encoded_streams = []
encoded_streams_substream_offsets = []
offset = 0
max_single_buffer_len = 0
ifm_bitdepth = tens.consumer_list[0].inputs[0].dtype.size_in_bits()
ifm_depth = weights.shape[-2]
if npu_block_type == NpuBlockType.ConvolutionDepthWise:
tens.block_traversal = TensorBlockTraversal.DepthWise
if npu_block_type == NpuBlockType.ConvolutionMxN:
# Determine which block traversal strategy has better DPU utilization
kernel_size = weights.shape[0] * weights.shape[1]
depth_utilization = weights.shape[2] / round_up(weights.shape[2], 32 if ifm_bitdepth == 8 else 16)
part_kernel_utilization = (weights.shape[2] / round_up(weights.shape[2], 8)) * (
kernel_size / round_up(kernel_size, 4 if ifm_bitdepth == 8 else 2)
)
if part_kernel_utilization >= depth_utilization or ifm_depth <= 8:
# Part-kernel first is always better for ifm depths <= 8
tens.block_traversal = TensorBlockTraversal.PartKernelFirst
else:
tens.block_traversal = TensorBlockTraversal.DepthFirst
is_depthwise = tens.block_traversal == TensorBlockTraversal.DepthWise
if tens.block_traversal == TensorBlockTraversal.PartKernelFirst:
block_traversal = NpuBlockTraversal.PART_KERNEL_FIRST
else:
block_traversal = NpuBlockTraversal.DEPTH_FIRST
if tens.consumer_list[0].type == Op.Conv2DBackpropInputSwitchedBias:
# Transpose Convoluion, reverse weights in H and W axes
weights = np.flip(weights, axis=(0, 1))
# Calculate brick size
brick_size = (weights.shape[0], weights.shape[1], weights.shape[2], min(tens.shape[-1], ofm_depth_step))
elements_in_brick = np.prod(brick_size)
# Slice weight stream up depth-ways into bricks and compress
full_ofm_depth = quant_buf.shape[-1]
for idx in range(0, full_ofm_depth, ofm_depth_step):
# Get the weights necessary for this brick
count = min(full_ofm_depth - idx, ofm_depth_step)
brick_weights = weights[:, :, :, idx : idx + count]
substream_offsets = [0]
encoded_stream = []
# For each core, deinterleave weights from the larger volume
# and generate separate compressed streams.
for core in range(0, min(arch.ncores, full_ofm_depth)):
core_weights = core_deinterleave(brick_weights, core, arch.ncores)
block_depth = (ofm_block_depth + arch.ncores - 1 - core) // arch.ncores
encoded_substream = []
if block_depth != 0:
encoded_substream = encode_weights(
accelerator=arch.accelerator_config,
weights_volume=core_weights,
dilation_xy=dilation,
ifm_bitdepth=ifm_bitdepth,
ofm_block_depth=block_depth,
is_depthwise=is_depthwise,
block_traversal=block_traversal,
)
encoded_stream.extend(encoded_substream)
substream_offsets.append(len(encoded_stream))
encoded_streams.append(encoded_stream)
encoded_streams_substream_offsets.append(substream_offsets)
# Remember maximum encoded length for DoubleBuffering
max_single_buffer_len = max(max_single_buffer_len, len(encoded_stream))
# Remember where we put it for linear addressing
compressed_offsets.append(offset)
offset += len(encoded_stream)
assert offset % 16 == 0
# Compression scale tracking
compression_scales.append(len(encoded_stream) / elements_in_brick)
# Track total length as last element of the offsets array
compressed_offsets.append(offset)
tens.weight_compression_scales = compression_scales
tens.weight_compressed_offsets = compressed_offsets
tens.compression_scale_for_worst_weight_stream = | np.amax(compression_scales) | numpy.amax |
import numpy
from numpy.linalg import norm
from scipy.fft import dct
from skimage.metrics import mean_squared_error
def spectral_psnr(norm_true_image, norm_test_image):
"""Spectral PSNR calculation
Parameters
----------
norm_true_image : numpy.typing.ArrayLike
norm_test_image : numpy.typing.ArrayLike
Returns
-------
Calculated PSNR : float
Notes
-----
Interesting package: https://github.com/andrewekhalel/sewar
"""
norm_true_image = norm_true_image / norm(norm_true_image.flatten(), 2)
norm_test_image = norm_test_image / norm(norm_test_image.flatten(), 2)
dct_norm_true_image = dct(
dct(norm_true_image, axis=0, workers=-1), axis=1, workers=-1
)
dct_norm_test_image = dct(
dct(norm_test_image, axis=0, workers=-1), axis=1, workers=-1
)
norm_dct_norm_true_image = dct_norm_true_image / norm(
dct_norm_true_image.flatten(), 2
)
norm_dct_norm_test_image = dct_norm_test_image / norm(
dct_norm_test_image.flatten(), 2
)
norm_true_image = abs(norm_dct_norm_true_image)
norm_test_image = abs(norm_dct_norm_test_image)
err = mean_squared_error(norm_true_image, norm_test_image)
return 10 * numpy.log10(1 / err)
def spectral_mutual_information(image_a, image_b, normalised: bool = True):
"""Spectral mutual information
Parameters
----------
image_a : numpy.typing.ArrayLike
image_b : numpy.typing.ArrayLike
normalised : bool
Returns
-------
mutual_information
"""
norm_image_a = image_a / norm(image_a.flatten(), 2)
norm_image_b = image_b / norm(image_b.flatten(), 2)
dct_norm_true_image = dct(dct(norm_image_a, axis=0, workers=-1), axis=1, workers=-1)
dct_norm_test_image = dct(dct(norm_image_b, axis=0, workers=-1), axis=1, workers=-1)
return mutual_information(
dct_norm_true_image, dct_norm_test_image, normalised=normalised
)
def joint_information(image_a, image_b, bins: int = 256):
"""Joint information
Parameters
----------
image_a : numpy.typing.ArrayLike
image_b : numpy.typing.ArrayLike
bins : int
Returns
-------
joint information
"""
image_a = image_a.flatten()
image_b = image_b.flatten()
c_xy = numpy.histogram2d(image_a, image_b, bins)[0]
ji = joint_entropy_from_contingency(c_xy)
return ji
def mutual_information(image_a, image_b, bins: int = 256, normalised: bool = True):
"""Mutual information
Parameters
----------
image_a : numpy.typing.ArrayLike
image_b : numpy.typing.ArrayLike
bins : int
normalised : bool
Returns
-------
mutual information
"""
image_a = image_a.flatten()
image_b = image_b.flatten()
c_xy = numpy.histogram2d(image_a, image_b, bins)[0]
mi = mutual_info_from_contingency(c_xy)
mi = mi / joint_entropy_from_contingency(c_xy) if normalised else mi
return mi
def joint_entropy_from_contingency(contingency):
"""Joint entropy from contingency
Parameters
----------
contingency : numpy.typing.ArrayLike
Returns
-------
Joint entropy from contingency
"""
# cordinates of non-zero entries in contingency table:
nzx, nzy = numpy.nonzero(contingency)
# non zero values:
nz_val = contingency[nzx, nzy]
# sum of all values in contingnecy table:
contingency_sum = contingency.sum()
# normalised contingency, i.e. probability:
p = nz_val / contingency_sum
# log contingency:
log_p = numpy.log2(p)
# Joint entropy:
joint_entropy = -p * log_p
return joint_entropy.sum()
def mutual_info_from_contingency(contingency):
"""Mutual info from contingency
Parameters
----------
contingency : numpy.typing.ArrayLike
Returns
-------
Mutual info from contingency
"""
# cordinates of non-zero entries in contingency table:
nzx, nzy = | numpy.nonzero(contingency) | numpy.nonzero |
#!/usr/bin/env python
#===========================================================================
#
# Produce plots for sun scan analysis
#
#===========================================================================
from __future__ import print_function
import os
import sys
import subprocess
from optparse import OptionParser
import numpy as np
import numpy.ma as ma
from numpy import convolve
import matplotlib.pyplot as plt
from matplotlib import dates
import math
import datetime
import contextlib
def main():
# globals
global options
global debug
# parse the command line
usage = "usage: %prog [options]"
parser = OptionParser(usage)
parser.add_option('--debug',
dest='debug', default=False,
action="store_true",
help='Set debugging on')
parser.add_option('--verbose',
dest='verbose', default=False,
action="store_true",
help='Set verbose debugging on')
parser.add_option('--sm_file',
dest='smFilePath',
default='../data/pecan/sun_mon.txt',
help='RadxSunMon results file path')
parser.add_option('--flux_file',
dest='fluxFilePath',
default='../data/pecan/fluxtable.txt',
help='Solar flux values from Penticton')
parser.add_option('--title',
dest='title',
default='SUN SCAN ANALYSIS - PECAN',
help='Title for plot')
parser.add_option('--width',
dest='figWidthMm',
default=400,
help='Width of figure in mm')
parser.add_option('--height',
dest='figHeightMm',
default=320,
help='Height of figure in mm')
parser.add_option('--meanLen',
dest='meanLen',
default=15,
help='Len of moving mean filter')
(options, args) = parser.parse_args()
if (options.verbose == True):
options.debug = True
if (options.debug == True):
print("Running %prog", file=sys.stderr)
print(" smFilePath: ", options.smFilePath, file=sys.stderr)
print(" fluxFilePath: ", options.fluxFilePath, file=sys.stderr)
# read in column headers for sunscan results
iret, smHdrs, smData = readColumnHeaders(options.smFilePath)
if (iret != 0):
sys.exit(-1)
# read in data for sunscan results
(smTimes, smData) = readInputData(options.smFilePath, smHdrs, smData)
# read in flux table data
(fluxTimes, fluxData) = readFluxData(options.fluxFilePath);
# render the plot
doPlot(smTimes, smData, fluxTimes, fluxData)
sys.exit(0)
########################################################################
# Read columm headers for the data
# this is in the first line
def readColumnHeaders(filePath):
colHeaders = []
colData = {}
fp = open(filePath, 'r')
line = fp.readline()
fp.close()
commentIndex = line.find("#")
if (commentIndex == 0):
# header
colHeaders = line.lstrip("# ").rstrip("\n").split()
if (options.debug == True):
print("colHeaders: ", colHeaders, file=sys.stderr)
else:
print("ERROR - readColumnHeaders", file=sys.stderr)
print(" First line does not start with #", file=sys.stderr)
return -1, colHeaders, colData
for index, var in enumerate(colHeaders, start=0):
colData[var] = []
return 0, colHeaders, colData
########################################################################
# Read in the data
def readInputData(filePath, colHeaders, colData):
# open file
fp = open(filePath, 'r')
lines = fp.readlines()
fp.close()
# read in a line at a time, set colData
for line in lines:
commentIndex = line.find("#")
if (commentIndex >= 0):
continue
# data
data = line.strip().split()
for index, var in enumerate(colHeaders, start=0):
if (var == 'count' or \
var == 'year' or var == 'month' or var == 'day' or \
var == 'hour' or var == 'min' or var == 'sec' or \
var == 'nBeamsNoise'):
colData[var].append(int(data[index]))
else:
if (isNumber(data[index])):
colData[var].append(float(data[index]))
else:
colData[var].append(data[index])
# load observation times array
year = colData['year']
month = colData['month']
day = colData['day']
hour = colData['hour']
minute = colData['min']
sec = colData['sec']
obsTimes = []
for ii, var in enumerate(year, start=0):
thisTime = datetime.datetime(year[ii], month[ii], day[ii],
hour[ii], minute[ii], sec[ii])
obsTimes.append(thisTime)
return obsTimes, colData
########################################################################
# Read in flux data
def readFluxData(filePath):
# open file
fp = open(filePath, 'r')
lines = fp.readlines()
fp.close()
# read in column headers from line 0
colHeaders = []
colHeaders = lines[0].lstrip(" ").rstrip("\n").split()
if (options.debug == True):
print("colHeaders: ", colHeaders, file=sys.stderr)
# read in a line at a time, set colData
colData = {}
for index, var in enumerate(colHeaders, start=0):
colData[var] = []
for line in lines:
if (line.find("flux") >= 0):
continue
if (line.find("----") >= 0):
continue
if (line.find("#") >= 0):
continue
# data
data = line.strip().split()
for index, var in enumerate(colHeaders, start=0):
if (var == 'fluxdate' or var == 'fluxtime'):
colData[var].append(data[index])
else:
if (isNumber(data[index])):
colData[var].append(float(data[index]))
# load observation times array
fdate = colData['fluxdate']
ftime = colData['fluxtime']
obsTimes = []
for ii, var in enumerate(fdate, start=0):
year = fdate[ii][0:4]
month = fdate[ii][4:6]
day = fdate[ii][6:8]
hour = ftime[ii][0:2]
minute = ftime[ii][2:4]
sec = ftime[ii][4:6]
thisTime = datetime.datetime(int(year), int(month), int(day),
int(hour), int(minute), int(sec))
obsTimes.append(thisTime)
return obsTimes, colData
########################################################################
# Check is a number
def isNumber(s):
try:
float(s)
return True
except ValueError:
return False
########################################################################
# Moving average filter
def movingAverage(values, filtLen):
weights = np.repeat(1.0, filtLen)/filtLen
sma = np.convolve(values, weights, 'valid')
smaList = sma.tolist()
for ii in range(0, filtLen / 2):
smaList.insert(0, smaList[0])
smaList.append(smaList[-1])
return np.array(smaList).astype(np.double)
########################################################################
# Plot
def doPlot(smTimes, smData, fluxTimes, fluxData):
# sunscan times
smtimes = np.array(smTimes).astype(datetime.datetime)
fileName = options.smFilePath
titleStr = "File: " + fileName
hfmt = dates.DateFormatter('%y/%m/%d')
# sun angle offset - only use values < max valid
maxValidOffset = 0.40
angleOffset = np.array(smData["angleOffset"]).astype(np.double)
validAngleOffset = (np.isfinite(angleOffset) & \
(angleOffset < maxValidOffset))
meanAngleOffset = np.mean(validAngleOffset)
elOffset = np.array(smData["elOffset"]).astype(np.double)
validElOffset = (np.isfinite(elOffset) & \
(angleOffset < maxValidOffset))
meanElOffset = np.mean(elOffset[validElOffset])
azOffset = np.array(smData["azOffset"]).astype(np.double)
validAzOffset = (np.isfinite(azOffset) & \
(angleOffset < maxValidOffset))
meanAzOffset = np.mean(azOffset[validAzOffset])
| np.set_printoptions(precision=3) | numpy.set_printoptions |
import os, sys, logging
from Tkinter import *
import functools
import numpy as np
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '../..'))
from tmlib.lda import LdaModel
from tmlib.datasets import utilizies
from tmlib.datasets.utilizies import DataFormat
from database import DataBase
def calc_similar(vector_i, vector_j, thresh):
indexs = np.where(vector_i <= thresh)[0]
vector_i[indexs] = 0
result = list()
for vector in vector_j:
indexs = np.where(vector <= thresh)[0]
vector[indexs] = 0
indexs = np.where(vector*vector_i > thresh)[0]
diff = np.fabs(np.log(vector_i[indexs]) - np.log(vector[indexs]))
result.append(np.sum(diff))
return np.array(result)
def frac(n):
if n < 0:
n += 2*np.pi
return 360 * n / (2*np.pi)
def is_in_arc(x, y, centerx, centery, R, angle0, angle1):
if (x-centerx)**2 + (y-centery)**2 > R**2:
return False
theta = - np.arctan2(y-centery, x-centerx)
return angle0 <= frac(theta) <= angle1
def get_arc(x, y, origin_coord, R, list_angle):
for i in range(len(list_angle)):
angle0, angle1 = list_angle[i]
if is_in_arc(x, y, origin_coord[0], origin_coord[1], R, angle0, angle1):
return i
return None
class TopicPage(Frame):
def __init__(self, root, model, db, vocab_file):
Frame.__init__(self, root)
self.top_words = model.print_top_words(20, vocab_file)
self.db = db
self.num_topics = len(self.top_words)
self.model = model
self.root = root
self.presence_score = model.presence_score
self.root = root
self.button = None
self.populate(self.presence_score)
def template(self):
self.parent = Canvas(self.root, width=1000, height=600, borderwidth=0, background="#D5DFF5")
self.frame = Frame(self.parent, background="#D5DFF5")
self.scroll = Scrollbar(self.root, orient="vertical", command=self.parent.yview)
self.parent.configure(yscrollcommand=self.scroll.set)
self.scroll.grid(row=1, column=1, sticky='nsew') # pack(side="right", fill="y")
self.parent.grid(row=1, column=0, sticky='nsew') # pack(side="left", fill="both", expand=True)
self.window = self.parent.create_window((80, 10), window=self.frame,
anchor="nw", tags="self.frame")
self.frame.bind("<Configure>", self.onFrameConfigure)
def hover(self, color, event):
canvas = event.widget
canvas.configure(background=color)
def populate(self, presence_score):
for widget in self.root.winfo_children():
widget.destroy()
self.template()
if self.button is not None:
self.button.destroy()
index_sorted = | np.argsort(presence_score) | numpy.argsort |
import numpy as np
from fractions import Fraction as Q
eps = 10e-4
def prvect(v):
print("( ", end="")
for i in v:
print(format(float(i), ".4f"), end=" ")
print(")")
def prmatr(m):
print("[")
for i in m:
prvect(i)
print("]")
def swap_rows(A, x, y):
if x == y: return A
temp = A[x].copy()
A[x] = A[y]
A[y] = temp
return(A)
def make_m(main, ind):
m = np.eye(3, dtype=Q)
temp = main[ind, ind]
for i in range(ind, 3):
m[i, ind] = - main[i, ind] / temp if i != ind else 1 / temp
return m
def make_rational(a):
temp = np.zeros(shape=(3, 4), dtype=Q)
for i in range(0, 3):
for j in range(0, 4):
temp[i, j] = Q(a[i, j])
return temp
def count_cordinality_num(m):
# det = m[0, 0] * m[1, 1] * m[2, 2] + m[2, 0] * m[0, 1] * m[1, 2] + m[0, 2] * m[1, 0] * m[2, 1] - m[0, 2] * m[1, 1] * m[2, 0] - m[1, 0] * m[0, 1] * m[2, 2] - m[2, 1] * m[1, 2] * m[0, 0]
inv = np.linalg.inv(raw_main_matrix)
return np.linalg.norm(inv, np.inf) * | np.linalg.norm(raw_main_matrix, np.inf) | numpy.linalg.norm |
import numpy as np
from scipy.linalg import hilbert,lu
n = 10
H = hilbert(10)
#------------------------Q1-----------------------------
L = np.array([[0. for i in range(n)] for i in range(n)])
for i in range(0,n):
L[i][i] = 1.
U = np.array([[0. for i in range(0,n)] for i in range(n)])
for i in range(0,n):
for j in range(0,n):
U[i][j] = H[i][j]
for i in range(n):
for j in range(i+1,n):
e = np.divide(U[j][i],U[i][i])
L[j][i] = e
to = | np.array(U[i]) | numpy.array |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from pathlib import Path
import logging
import numpy as np
from scipy import signal
from scipy.io import wavfile
from ibllib import dsp
import ibllib.io.raw_data_loaders as ioraw
from ibllib.io.extractors.training_trials import GoCueTimes
logger_ = logging.getLogger('ibllib')
NS_WIN = 2 ** 18 # 2 ** np.ceil(np.log2(1 * fs))
OVERLAP = NS_WIN / 2
NS_WELCH = 512
FTONE = 5000
UNIT = 'dBFS' # dBFS or dbSPL
READY_TONE_SPL = 85
def _running_mean(x, N):
cumsum = np.cumsum(np.insert(x, 0, 0))
return (cumsum[N:] - cumsum[:-N]) / N
def _detect_ready_tone(w, fs):
# get envelope of DC free signal and envelope of BP signal around freq of interest
h = np.abs(signal.hilbert(w - np.median(w)))
fh = np.abs(signal.hilbert(dsp.bp(w, si=1 / fs, b=FTONE * np.array([0.9, 0.95, 1.15, 1.1]))))
dtect = _running_mean(fh / (h + 1e-3), int(fs * 0.1)) > 0.8
return np.where(np.diff(dtect.astype(int)) == 1)[0]
# tone = np.sin(2 * np.pi * FTONE * np.arange(0, fs * 0.1) / fs)
# tone = tone / np.sum(tone ** 2)
# xc = np.abs(signal.hilbert(signal.correlate(w - np.mean(w), tone)))
def _get_conversion_factor(unit=UNIT, ready_tone_spl=READY_TONE_SPL):
# 3 approaches here (not exclusive):
# a- get the mic sensitivity, the preamp gain and DAC parameters and do the math
# b- treat the whole thing as a black box and do a calibration run (cf. people at Renard's lab)
# c- use calibrated ready tone
# The reference of acoustic pressure is 0dBSPL @ 1kHz which is threshold of hearing (20 μPa).
# Usual calibration is 1 Pa (94 dBSPL) at 1 kHz
# c) here we know that the ready tone is 55dB SPL at 5kHz, assuming a flat spectrum between
# 1 and 5 kHz, and observing the peak value on the 5k at the microphone.
if unit == 'dBFS':
return 1.0
distance_to_the_mic = .155
peak_value_observed = 60
rms_value_observed = np.sqrt(2) / 2 * peak_value_observed
fac = 10 ** ((ready_tone_spl - 20 * np.log10(rms_value_observed)) / 20) * distance_to_the_mic
return fac
def welchogram(fs, wav, nswin=NS_WIN, overlap=OVERLAP, nperseg=NS_WELCH):
"""
Computes a spectrogram on a very large audio file.
:param fs: sampling frequency (Hz)
:param wav: wav signal (vector or memmap)
:param nswin: n samples of the sliding window
:param overlap: n samples of the overlap between windows
:param nperseg: n samples for the computation of the spectrogram
:return: tscale, fscale, downsampled_spectrogram
"""
ns = wav.shape[0]
window_generator = dsp.WindowGenerator(ns=ns, nswin=nswin, overlap=overlap)
nwin = window_generator.nwin
fscale = dsp.fscale(nperseg, 1 / fs, one_sided=True)
W = np.zeros((nwin, len(fscale)))
tscale = window_generator.tscale(fs=fs)
detect = []
for first, last in window_generator.firstlast:
# load the current window into memory
w = np.float64(wav[first:last]) * _get_conversion_factor()
# detection of ready tones
a = [d + first for d in _detect_ready_tone(w, fs)]
if len(a):
detect += a
# the last window may not allow a pwelch
if (last - first) < nperseg:
continue
# compute PSD estimate for the current window
iw = window_generator.iw
_, W[iw, :] = signal.welch(w, fs=fs, window='hanning', nperseg=nperseg, axis=-1,
detrend='constant', return_onesided=True, scaling='density')
if (iw % 50) == 0:
window_generator.print_progress()
window_generator.print_progress()
# the onset detection may have duplicates with sliding window, average them and remove
detect = np.sort( | np.array(detect) | numpy.array |
# linker.py
# All manner of helpers related to computing the function
# which links our particular surrogate's excess risk to the
# excess classification risk incurred.
import numpy as np
import math
import helpers as hlp
import getroots as gtrt
def cube(x, a, b, c, d):
'''
Return the value of a cubic polynomial.
Can vectorize with respect to x or the weights,
but not both of course.
'''
return a*x**3 + b*x**2 + c*x + d
# Coefficients that specify the "plus" version of our single cube condition.
def coef_onecubeplus_A(gam, eta):
return np.zeros(eta.shape) + 1.0
def coef_onecubeplus_B(gam, eta):
return np.zeros(eta.shape) + 3*gam
def coef_onecubeplus_C(gam, eta):
return np.zeros(eta.shape) + 3*gam**2 - 6
def coef_onecubeplus_D(gam, eta):
alpha = (eta-1)/eta
return -6 * ( hlp.psi_catnew(math.sqrt(2))/alpha + gam - gam**3/6 )
# Coefficients that specify the "minus" version of our single cube condition.
def coef_onecubeminus_A(gam, eta):
return np.zeros(eta.shape) + 1.0
def coef_onecubeminus_B(gam, eta):
return np.zeros(eta.shape) - 3*gam
def coef_onecubeminus_C(gam, eta):
return np.zeros(eta.shape) + 3*gam**2 - 6
def coef_onecubeminus_D(gam, eta):
alpha = (eta-1)/eta
return 6 * ( hlp.psi_catnew(math.sqrt(2))*alpha + gam - gam**3/6 )
def coef_twocube_A(gam, eta):
alpha = (eta-1)/eta
return (alpha-1)
def coef_twocube_B(gam, eta):
alpha = (eta-1)/eta
return -3*gam*(1+alpha)
def coef_twocube_C(gam, eta):
alpha = (eta-1)/eta
return (1-alpha)*(3*gam**2 - 6)
def coef_twocube_D(gam, eta):
alpha = (eta-1)/eta
return (1+alpha)*(6*gam-gam**3)
def lossfn(u, gam):
'''
The surrogate loss function induced by rho(gamma-u).
'''
return hlp.rho_catnew((gam-u))
def condPhiRisk(u, eta, gam):
return eta * lossfn(u=u, gam=gam) + (1-eta) * lossfn(u=(-u), gam=gam)
def Hfn_single_large(gam, eta):
deltaval = math.fabs((math.sqrt(2)-gam))
myA = coef_onecubeminus_A(gam=gam, eta=eta)
myB = coef_onecubeminus_B(gam=gam, eta=eta)
myC = coef_onecubeminus_C(gam=gam, eta=eta)
myD = coef_onecubeminus_D(gam=gam, eta=eta)
roots = np.zeros(eta.shape)
for t in range(roots.size):
etaval = eta[t]
rootval = gtrt.getroot(a=myA[t], b=myB[t], c=myC[t], d=myD[t])
rootval = rootval[(np.sign(rootval) == math.copysign(1.0,(etaval-1/2)))]
rootval = rootval[np.abs(rootval) >= deltaval]
rootval = rootval[np.abs(rootval) <= gam]
roots[t] = rootval[0] # should be just one left.
Hvals = condPhiRisk(u=roots, eta=eta, gam=gam)
return Hvals
def Hfn_single_small(gam, eta):
deltaval = math.fabs((math.sqrt(2)-gam))
myA = coef_onecubeplus_A(gam=gam, eta=eta)
myB = coef_onecubeplus_B(gam=gam, eta=eta)
myC = coef_onecubeplus_C(gam=gam, eta=eta)
myD = coef_onecubeplus_D(gam=gam, eta=eta)
roots = np.zeros(eta.shape)
for t in range(roots.size):
etaval = eta[t]
rootval = gtrt.getroot(a=myA[t], b=myB[t], c=myC[t], d=myD[t])
rootval = rootval[(np.sign(rootval) == math.copysign(1.0,(etaval-1/2)))]
rootval = rootval[np.abs(rootval) >= deltaval]
rootval = rootval[np.abs(rootval) <= gam]
roots[t] = rootval[0] # should be just one left.
Hvals = condPhiRisk(u=roots, eta=eta, gam=gam)
return Hvals
def Hfn_double(gam, eta):
myA = coef_twocube_A(gam=gam, eta=eta)
myB = coef_twocube_B(gam=gam, eta=eta)
myC = coef_twocube_C(gam=gam, eta=eta)
myD = coef_twocube_D(gam=gam, eta=eta)
roots_list = [ gtrt.getroot(a=myA[t], b=myB[t], c=myC[t], d=myD[t])[0] for t in range(myA.size) ]
roots = np.array(roots_list)
Hvals = condPhiRisk(u=roots, eta=eta, gam=gam)
return Hvals
def Hfn(eta, gam):
out = np.zeros(eta.shape)
deltaval = math.fabs((gam-math.sqrt(2)))
idx_zero = np.nonzero(eta == 0)[0]
idx_one = np.nonzero(eta == 1)[0]
idx_half = np.nonzero(eta == 1/2)[0]
idx_small = np.setdiff1d(np.nonzero(eta < 1/2)[0], idx_zero)
idx_large = np.setdiff1d( | np.nonzero(eta > 1/2) | numpy.nonzero |
import logging
import cv2
import numpy as np
import pytesseract
import os
import time
import json
import re
from multiprocessing import Pool
from Levenshtein import distance
from .input_handler import InputHandler
from .grabscreen import grab_screen
from .utils import get_config, filter_mod
# This is a position of the inventory as fraction of the resolution
OWN_INVENTORY_ORIGIN = (0.6769531, 0.567361)
# These are the sockets positions as measured on 2560x1440 resolution
# with X_SCALE and Y_SCALE applied, i.e., scale * SOCKETS[i] is the i:th
# sockets absolute pixel position with origin in the middle of the skill tree
# I think the SCALE variables are in fact useless and a relics from the
# positions initially being measured at a view which wasn't zoomed out maximally
SOCKETS = {
1: (-650.565, -376.013),
2: (648.905, -396.45),
3: (6.3354, 765.658),
4: (-1700.9, 2424.17),
5: (-2800.66, -215.34),
6: (-1435.02, -2635.39),
7: (1855.53, -2360.1),
8: (2835.84, 230.5361),
9: (1225.37, 2625.76),
10: (-120.12471, 5195.44),
11: (-3580.19, 5905.92),
12: (-5395.86, 2120.42),
13: (-6030.95, -115.7007),
14: (-5400.59, -1985.18),
15: (-3035.14, -5400.87),
16: (160.10728, -5196.32),
17: (3382.05, -5195.21),
18: (5730.2, -1625.75),
19: (6465.24, 190.3341),
20: (5542.76, 1690.07),
21: (3322.76, 6090.5),
}
# The offsets are specified in the same fashion as SOCKETS and are rough
# guesses which allow us to move to the general area and later refine the
# position of the socket through template matching
SOCKET_MOVE_OFFSET = {
1: (0, 150),
2: (0, 150),
3: (0, 200),
4: (0, 150),
5: (-300, 200),
6: (-100, 150),
7: (-150, 0),
8: (0, -150),
9: (-100, -125),
10: (170, 0),
11: (-400, -900),
12: (0, 300),
13: (400, 200),
14: (-250, -150),
15: (-100, -150),
16: (150, -150),
17: (150, 500), #
18: (-300, 400),
19: (-1000, -150),
20: (-500, 500),
21: (100, -1000),
}
# Scalers for the SOCKETS positions to convert them to 2560x1440 pixel positions
X_SCALE = 0.2
Y_SCALE = 0.2
CIRCLE_EFFECTIVE_RADIUS = 300
IMAGE_FOLDER = "data/images/"
# We're using a lot of template matching and all templates are defined here
# with matching thresholds (scores) and sizes per resolution
TEMPLATES = {
"AmbidexterityCluster.png": {
"1440p_size": (34, 34),
"1440p_threshold": 0.95,
"1080p_size": (26, 26),
"1080p_threshold": 0.95,
},
"FreeSpace.png": {
"1440p_size": (41, 41),
"1440p_threshold": 0.98,
"1080p_size": (30, 30),
"1080p_threshold": 0.98,
},
"Notable.png": {
"1440p_size": (30, 30),
"1440p_threshold": 0.89,
"1080p_size": (23, 23),
"1080p_threshold": 0.85,
},
"NotableAllocated.png": {
"1440p_size": (30, 30),
"1440p_threshold": 0.93,
"1080p_size": (23, 23),
"1080p_threshold": 0.90,
},
"Jewel.png": {
"1440p_size": (30, 30),
"1440p_threshold": 0.92,
"1080p_size": (23, 23),
"1080p_threshold": 0.92,
},
"JewelSocketed.png": {
"1440p_size": (30, 30),
"1440p_threshold": 0.9,
"1080p_size": (23, 23),
"1080p_threshold": 0.9,
},
"LargeJewel.png": {
"1440p_size": (39, 39),
"1440p_threshold": 0.9,
"1080p_size": (30, 30),
"1080p_threshold": 0.88,
},
"LargeJewelSocketed.png": {
"1440p_size": (39, 39),
"1440p_threshold": 0.9,
"1080p_size": (30, 30),
"1080p_threshold": 0.88,
},
"Skill.png": {
"1440p_size": (21, 21),
"1440p_threshold": 0.87,
"1080p_size": (15, 15),
"1080p_threshold": 0.91,
},
"SkillAllocated.png": {
"1440p_size": (21, 21),
"1440p_threshold": 0.93,
"1080p_size": (15, 15),
"1080p_threshold": 0.91,
},
}
# Defines the position of the text box which is cropped out and OCR'd per node
TXT_BOX = {"x": 32, "y": 0, "w": 900, "h": 320}
mod_files = {
"passives": "data/passives.json",
"passivesAlt": "data/passivesAlternatives.json",
"passivesAdd": "data/passivesAdditions.json",
"passivesVaalAdd": "data/passivesVaalAdditions.json",
}
class TreeNavigator:
def __init__(self, resolution, halt_value):
self.resolution = resolution
self.input_handler = InputHandler(self.resolution)
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(message)s",
datefmt="[%H:%M:%S %d-%m-%Y]",
)
self.log = logging.getLogger("tree_nav")
self.config = get_config("tree_nav")
self.find_mod_value_re = re.compile("(\(?(?:[0-9]*\.?[0-9]-?)+\)?)")
self.nonalpha_re = re.compile("[^a-zA-Z]")
self.origin_pos = (self.resolution[0] / 2, self.resolution[1] / 2)
self.ingame_pos = [0, 0]
self.px_multiplier = self.resolution[0] / 2560
self.resolution_prefix = str(self.resolution[1]) + "p_"
self.templates_and_masks = self.load_templates()
self.passive_mods, self.passive_names = self.generate_good_strings(mod_files)
self.passive_nodes = list(self.passive_mods.keys()) + list(
self.passive_names.keys()
)
self.halt = halt_value
self.first_run = True
def _run(self):
return not bool(self.halt.value)
def eval_jewel(self, item_location):
self.ingame_pos = [0, 0]
item_name, item_desc = self._setup(item_location, copy=True)
pool = Pool(self.config["ocr_threads"])
jobs = {}
if self.first_run:
# We just initiated the module and not sure where we are
# Thus, we better rectify our position estimate before starting
self._refind_position(SOCKETS[1])
self.first_run = False
for socket_id in sorted(SOCKETS.keys()):
if not self._run():
return None, None, None
found_socket = self._move_screen_to_socket(socket_id)
if not found_socket and socket_id == 1:
self.log.info("We are lost - trying to find known location")
# We just initiated the search and have no clue where we are
# Thus, we better rectify our position estimate before starting
self._refind_position(SOCKETS[1])
socket_nodes = self._analyze_nodes(socket_id)
# Convert stats for the socket from image to lines in separate process
self.log.info("Performing asynchronous OCR")
jobs[socket_id] = pool.map_async(OCR.node_to_strings, socket_nodes)
self.log.info("Analyzed socket %s" % socket_id)
# Return to socket 1 to ease next search
self._move_to_tree_pos_using_spaces(SOCKETS[1])
self._setup(item_location)
self.log.info("Waiting for last OCR to finish")
item_stats = [
{
"socket_id": socket_id,
"socket_nodes": self._filter_ocr_lines(
jobs[socket_id].get(timeout=300)
),
}
for socket_id in jobs
]
pool.close()
pool.join()
return item_name, item_desc, item_stats
def load_templates(self, threshold=128):
templates_and_masks = {}
for template_name in TEMPLATES.keys():
template_path = os.path.join(IMAGE_FOLDER, template_name)
img = cv2.imread(template_path, cv2.IMREAD_UNCHANGED)
size = TEMPLATES[template_name][self.resolution_prefix + "size"]
channels = cv2.split(img)
mask = None
if len(channels) > 3:
mask = np.array(channels[3])
mask[mask <= threshold] = 0
mask[mask > threshold] = 255
mask = cv2.resize(mask, size)
img = cv2.imread(template_path, 0)
img = cv2.resize(img, size)
templates_and_masks[template_name] = {"image": img, "mask": mask}
return templates_and_masks
def _move_screen_to_socket(self, socket_id):
self.log.debug("Moving close to socket %s" % socket_id)
move_offset_tx, move_offset_ty = SOCKET_MOVE_OFFSET[socket_id]
move_offset = self._tree_pos_to_xy(
[move_offset_tx, move_offset_ty], offset=True
)
socket_tx, socket_ty = SOCKETS[socket_id]
socket_xy = self._tree_pos_to_xy([socket_tx, socket_ty])
compensation_offset = self._find_socket(socket_xy)
if compensation_offset is None:
found_socket = False
compensation_offset = [0, 0]
else:
found_socket = True
self.log.debug("Compensated navigation with %s" % compensation_offset)
move_to = [
socket_xy[0] + compensation_offset[0] + move_offset[0],
socket_xy[1] + compensation_offset[1] + move_offset[1],
]
x_offset = move_to[0] - self.resolution[0] / 2
y_offset = move_to[1] - self.resolution[1] / 2
self.input_handler.click(
*move_to, *move_to, button=None, raw=True, speed_factor=1
)
self.input_handler.drag(self.origin_pos[0], self.origin_pos[1], speed_factor=1)
self.input_handler.rnd_sleep(min=200, mean=300, sigma=100)
self.ingame_pos = [socket_tx + move_offset_tx, socket_ty + move_offset_ty]
return found_socket
def _refind_position(self, desired_tree_pos):
# If the current location has been determined to be incorrect
# we can go to the bottom right corner and find a cluster close
# to socket 21, namely the Ambidexterity cluster
# This is a known location, which can then be used to calculate
# our way to a desired position
self.log.debug("Centering screen position")
# Correct our tree position to a known value
self._locate_screen_using_ambidexterity()
# Find our way to the desired position
self._move_to_tree_pos_using_spaces(desired_tree_pos)
def _move_to_tree_pos_using_spaces(self, desired_tree_pos, max_position_error=5):
dx = desired_tree_pos[0] - self.ingame_pos[0]
dy = desired_tree_pos[1] - self.ingame_pos[1]
self.log.debug("Moving to tree pos using spaces. Deltas: ({}, {})".format(dx, dy))
while (abs(dx) + abs(dy)) > max_position_error:
# Choose quadrant to find spaces in based on dx, dy
right, bottom = dx >= 0, dy >= 0
if right and not bottom:
quadrant = 0
elif not right and not bottom:
quadrant = 1
elif not right and bottom:
quadrant = 2
elif right and bottom:
quadrant = 3
# Find empty spaces that we can drag from
spaces = self._find_empty_space(quadrant)
if spaces is None:
raise ValueError("Could not find an empty space, quitting.")
# Choose a random empty space for maximum drag
chosen_space = spaces[np.random.randint(spaces.shape[0])]
# How far to drag the window to end up in the optimal place
screen_move_x, screen_move_y = self._tree_pos_to_xy([dx, dy],
offset=True)
# Calculate where our drag should end up to perform the move
drag_x = chosen_space[0] - screen_move_x
drag_y = chosen_space[1] - screen_move_y
# We should only drag within the screen's resolution
# Additionally, we use 100px margin to not trigger tree scroll
drag_x = np.clip(drag_x, 100, self.resolution[0] - 100)
drag_y = np.clip(drag_y, 100, self.resolution[1] - 100)
# Drag
self.input_handler.click(
*chosen_space, *chosen_space, button=None, raw=True, speed_factor=1
)
self.input_handler.drag(drag_x, drag_y, speed_factor=1)
self.input_handler.rnd_sleep(min=200, mean=300, sigma=100)
# Calculate how far we've actually moved
effective_move_x = chosen_space[0] - drag_x
effective_move_y = chosen_space[1] - drag_y
# Update our internal tree position
self.ingame_pos = self._add_xy_offset_to_tree_pos(
[effective_move_x, effective_move_y]
)
# Figure out how much we have left to move
dx = desired_tree_pos[0] - self.ingame_pos[0]
dy = desired_tree_pos[1] - self.ingame_pos[1]
def _locate_screen_using_ambidexterity(self):
# Essentially, this is _move_to_tree_pos_using_spaces but
# only used to find the tree position by navigating to a known point
self.log.debug("Moving to ambidexterity")
ambidexterity_position = None
assumed_ambidexterity_position = (0.25234375, 0.20555556)
while ambidexterity_position is None:
# Find empty spaces that we can drag from
spaces = self._find_empty_space(3)
if spaces is None:
raise ValueError("Could not find an empty space, quitting.")
# Choose the farthest empty space for maximum drag
chosen_space = spaces[np.argmax(spaces.sum(axis=1))]
# An arbitrary position in the top left region
drag_location = (200, 200)
# Drag
self.input_handler.click(
*chosen_space, *chosen_space, button=None, raw=True, speed_factor=1
)
self.input_handler.drag(drag_location[0], drag_location[1], speed_factor=1)
self.input_handler.rnd_sleep(min=200, mean=300, sigma=100)
# Are we there yet?
# i.e., have we reached Ambidexterity, which in that case is at
# roughly (646, 296) in absolute 1440p screen px position
ambidexterity_position = self._find_icon(
assumed_ambidexterity_position, "AmbidexterityCluster.png"
)
# Ambidexterity is located (-560, 850) from socket 21
# Thus, this plus any (scaled) offset found by the template matcher is
# our tree position
self.ingame_pos = [
SOCKETS[21][0]
- 560
+ ambidexterity_position[0] / (X_SCALE * self.px_multiplier),
SOCKETS[21][1]
+ 850
+ ambidexterity_position[1] / (Y_SCALE * self.px_multiplier),
]
def _find_empty_space(self, quadrant):
# Finds empty spaces that can be used to drag the screen
# Used to recenter the screen
# The quadrant argument is an int in [0, 1, 2, 3], corresponding to
# [top-right, top-left, bottom-left, bottom-right]
quadrant_translation = {0: [0.5, 0], 1: [0, 0], 2: [0, 0.5], 3: [0.5, 0.5]}
fractional_lt = quadrant_translation[quadrant]
lt = [
int(fractional_lt[0] * self.resolution[0]),
int(fractional_lt[1] * self.resolution[1]),
]
rb = [int(lt[0] + self.resolution[0] / 2),
int(lt[1] + self.resolution[1] / 2)]
searched_area = grab_screen(tuple(lt + rb))
searched_area = cv2.cvtColor(searched_area, cv2.COLOR_BGR2GRAY)
locations = np.zeros_like(searched_area)
centered_coordinates = self._match_image(searched_area, "FreeSpace.png")
locations[tuple(centered_coordinates)] = 1
rel_space_pos_yx = np.argwhere(locations == 1)
rel_space_pos = rel_space_pos_yx.T[::-1].T
if len(rel_space_pos) == 0:
self.log.warning("Could not find any free spaces in tree!")
return None
screen_space_pos = rel_space_pos + lt
# remove positions that are close to edges as these trigger scroll
screen_space_pos = screen_space_pos[(screen_space_pos[:, 0] > 100) &
(screen_space_pos[:, 1] > 100) &
(screen_space_pos[:, 0] < self.resolution[0] - 100) &
(screen_space_pos[:, 1] < self.resolution[1] - 100)]
return screen_space_pos
def _find_icon(self, assumed_position, icon_name):
# Finds the ambidexerity cluster icon in the region it sits in
# if we are at the bottom-right corner of the tree
# The exact location is used to refine our knowledge of our position
abs_assumed_position = (
assumed_position[0] * self.resolution[0],
assumed_position[1] * self.resolution[1],
)
margin_side = int(0.05 * self.resolution[0])
lt = [
int(abs_assumed_position[0] - margin_side / 2),
int(abs_assumed_position[1] - margin_side / 2),
]
rb = [
int(abs_assumed_position[0] + margin_side / 2),
int(abs_assumed_position[1] + margin_side / 2),
]
searched_area = grab_screen(tuple(lt + rb))
searched_area = cv2.cvtColor(searched_area, cv2.COLOR_BGR2GRAY)
locations = np.zeros((margin_side, margin_side))
centered_coordinates = self._match_image(searched_area, icon_name)
locations[tuple(centered_coordinates)] = 1
rel_icon_pos_yx = np.argwhere(locations == 1)
rel_icon_pos = rel_icon_pos_yx.T[::-1].T
if len(rel_icon_pos) == 0:
return None
icon_offset = [
int(rel_icon_pos[0][0] - margin_side / 2 + abs_assumed_position[0]),
int(rel_icon_pos[0][1] - margin_side / 2 + abs_assumed_position[1]),
]
return icon_offset
def _click_socket(self, socket_pos, insert=True):
self.log.debug("Clicking socket")
xy = socket_pos
lt = [xy[0] - 5 * self.px_multiplier, xy[1] - 5 * self.px_multiplier]
rb = [xy[0] + 5 * self.px_multiplier, xy[1] + 5 * self.px_multiplier]
if insert:
self.input_handler.click(*lt, *rb, button="left", raw=True)
else:
self.input_handler.click(*lt, *rb, button="right", raw=True)
self.input_handler.rnd_sleep(min=200, mean=300)
def _tree_pos_to_xy(self, pos, offset=False):
if offset:
return [
pos[0] * X_SCALE * self.px_multiplier,
pos[1] * Y_SCALE * self.px_multiplier,
]
uncentered_xy = [
(pos[0] - self.ingame_pos[0]) * X_SCALE * self.px_multiplier,
(pos[1] - self.ingame_pos[1]) * Y_SCALE * self.px_multiplier,
]
xy = [
int(uncentered_xy[0] + self.origin_pos[0]),
int(uncentered_xy[1] + self.origin_pos[1]),
]
return xy
def _add_xy_offset_to_tree_pos(self, offset):
tree_pos = [
self.ingame_pos[0] + offset[0] / (X_SCALE * self.px_multiplier),
self.ingame_pos[1] + offset[1] / (Y_SCALE * self.px_multiplier),
]
return tree_pos
def _analyze_nodes(self, socket_id):
self.log.info("Analyzing nodes for socket id %s" % socket_id)
nodes = []
node_locations, socket_pos = self._find_nodes(socket_id)
self.log.debug(
"Found %s nodes for socket id %s" % (len(node_locations), socket_id)
)
self._click_socket(socket_pos)
for location in node_locations:
if not self._run():
return
node_stats = self._get_node_data(location)
node = {
"location": self._socket_offset_pos(socket_pos, location),
"stats": node_stats,
}
nodes.append(node)
self._click_socket(socket_pos, insert=False)
return nodes
def _socket_offset_pos(self, socket_pos, node_location):
circle_radius = CIRCLE_EFFECTIVE_RADIUS * self.px_multiplier
return [
(node_location[0] - socket_pos[0]) / circle_radius,
(node_location[1] - socket_pos[1]) / circle_radius,
]
def _filter_ocr_lines(self, nodes_lines, max_dist=4):
filtered_nodes = []
for node in nodes_lines:
names = []
mods = []
for line in node["stats"]:
filtered_line = self._filter_nonalpha(line)
if len(filtered_line) < 4 or filtered_line == "Unallocated":
continue
if filtered_line in self.passive_names:
names.append(self.passive_names[filtered_line])
elif filtered_line in self.passive_mods:
filtered_mod, value = filter_mod(line, regex=self.nonalpha_re)
new_mod = re.sub(
self.find_mod_value_re,
str(value),
self.passive_mods[filtered_line],
count=1,
)
mods.append(new_mod)
else:
# Sometimes the OCR might return strange results. If so,
# as a last resort, check levenshtein distance to closest
# node. This shouldn't happen often.
best_distance = 99999999999
best_match = None
for possible_mod in self.passive_nodes:
d = distance(filtered_line, possible_mod)
if d < best_distance:
best_distance = d
best_match = possible_mod
if best_distance > max_dist:
continue
if best_match in self.passive_names:
names.append(self.passive_names[best_match])
elif best_match in self.passive_mods:
filtered_mod, value = filter_mod(line, regex=self.nonalpha_re)
new_mod = re.sub(
self.find_mod_value_re,
str(value),
self.passive_mods[best_match],
count=1,
)
mods.append(new_mod)
if mods:
filtered_nodes.append(
{"location": node["location"], "name": names, "mods": mods}
)
return filtered_nodes
def _find_nodes(self, socket_id):
self.input_handler.click(0.5, 0.07, 0.51, 0.083, button=None)
socket_pos = self._tree_pos_to_xy(SOCKETS[socket_id])
socket_offset = self._find_socket(socket_pos)
if socket_offset is None:
found_socket = False
socket_offset = [0, 0]
else:
found_socket = True
self.log.debug("Jewel socket offset correction: %s" % socket_offset)
socket_pos[0] += socket_offset[0]
socket_pos[1] += socket_offset[1]
# Add some margin so that we dont accidentally cut any nodes off
margin = 20 * self.px_multiplier
x1 = int(socket_pos[0] - CIRCLE_EFFECTIVE_RADIUS * self.px_multiplier - margin)
y1 = int(socket_pos[1] - CIRCLE_EFFECTIVE_RADIUS * self.px_multiplier - margin)
x2 = int(x1 + 2 * CIRCLE_EFFECTIVE_RADIUS * self.px_multiplier + 2 * margin)
y2 = int(y1 + 2 * CIRCLE_EFFECTIVE_RADIUS * self.px_multiplier + 2 * margin)
nodes = self._get_node_locations_from_screen((x1, y1, x2, y2))
nodes = self._filter_nodes(nodes, socket_pos)
return nodes, socket_pos
def _find_socket(self, socket_pos, side_len=100):
lt = [int(socket_pos[0] - side_len / 2), int(socket_pos[1] - side_len / 2)]
rb = [lt[0] + side_len, lt[1] + side_len]
socket_area = grab_screen(tuple(lt + rb))
socket_area = cv2.cvtColor(socket_area, cv2.COLOR_BGR2GRAY)
locations = np.zeros((side_len, side_len))
for template_name in [
"Jewel.png",
"JewelSocketed.png",
"LargeJewel.png",
"LargeJewelSocketed.png",
]:
centered_coordinates = self._match_image(socket_area, template_name)
locations[tuple(centered_coordinates)] = 1
rel_node_pos_yx = np.argwhere(locations == 1)
rel_node_pos = rel_node_pos_yx.T[::-1].T
if len(rel_node_pos) == 0:
self.log.warning("Could not find any jewel socket for compensating offset!")
return None
socket_offset = [
int(rel_node_pos[0][0] - side_len / 2),
int(rel_node_pos[0][1] - side_len / 2),
]
return socket_offset
def _filter_nodes(self, nodes, socket_pos, duplicate_min_dist=10):
# filter duplicate nodes
kept_node_indices = [len(nodes) - 1]
z = np.array([[complex(c[0], c[1]) for c in nodes]])
dist_matrix = abs(z.T - z)
for node_idx in range(len(nodes) - 1):
if np.min(dist_matrix[node_idx + 1 :, node_idx]) >= duplicate_min_dist:
kept_node_indices.append(node_idx)
nodes = np.array(nodes)
nodes = nodes[kept_node_indices, :]
# filter nodes outside jewel socket radius
distances_to_socket = np.sqrt(np.sum((nodes - socket_pos) ** 2, axis=1))
nodes = nodes[
distances_to_socket <= CIRCLE_EFFECTIVE_RADIUS * self.px_multiplier
]
return nodes
def _get_node_locations_from_screen(self, box):
jewel_area_bgr = grab_screen(box)
jewel_area_gray = cv2.cvtColor(jewel_area_bgr, cv2.COLOR_BGR2GRAY)
locations = np.zeros((box[2] - box[0], box[3] - box[1]))
for template_name in [
"Notable.png",
"NotableAllocated.png",
"Skill.png",
"SkillAllocated.png",
]:
centered_coordinates = self._match_image(jewel_area_gray, template_name)
locations[tuple(centered_coordinates)] = 1
rel_node_pos_yx = | np.argwhere(locations == 1) | numpy.argwhere |
# © 2020 Nokia
#
# Licensed under the BSD 3 Clause license
#
# SPDX-License-Identifier: BSD-3-Clause
# ============================================
import copy
import torch
import numpy as np
from codesearch.utils import get_best_device
def pad_sequences(sequences, max_len, padding_symbol ='<pad>'):
max_len_batch = max(len(s) for s in sequences)
max_len = min(max_len, max_len_batch)
sequences = [list(s) for s in sequences]
for s in sequences:
while len(s) < max_len:
s.append(padding_symbol)
while len(s) > max_len:
s.pop(-1)
return sequences
def extract_mask(padded_sequences, padding_symbol='<pad>'):
batch_size = len(padded_sequences)
seq_len = len(padded_sequences[0])
mask = | np.ones(shape=(batch_size, seq_len)) | numpy.ones |
from smpr3d.util import *
from smpr3d.algorithm import *
from smpr3d.setup import *
from smpr3d.torch_imports import *
import os
from timeit import default_timer as time
import numpy as np
D = 4
dx = np.array([0.2, 0.2])
E = 300e3
lam = wavelength(E)
C1_target = np.linspace(0, 100, D, dtype=np.float32)
alpha_rad = 20e-3
q_aperture = alpha_rad / lam
dtype = np.float32
args = Param()
args.beam_threshold_percent = 5
args.max_phase_error = np.pi / 200
args.use_full_smatrix = True
args.uniform_initial_intensity = True
# %% load data
world_size = 1
rank = 0
device = th.device('cuda:0')
C = th.zeros(12, D).to(device)
specimen_thickness_angstrom = 100
r0 = None
a = None
I_target = None
Psi0 = None
y_max, x_max, y_min, x_min = 0, 0, 0, 0
skip_nyquist_positions = 2
dx_scan = dx * skip_nyquist_positions
FOV_simulation = th.as_tensor(np.array([70, 70]) * dx_scan)
scan_shape = th.round(((FOV_simulation / dx_scan))).int() // 2
# create positions
K = int(th.prod(scan_shape))
K_rank = K
pos = th.zeros((K, 2))
ls = np.linspace(0, scan_shape[0] * skip_nyquist_positions, scan_shape[0], endpoint=True, dtype=np.float32)
pos[:, 0] = th.from_numpy(np.repeat(ls, scan_shape[1])) * skip_nyquist_positions
ls = np.linspace(0, scan_shape[1] * skip_nyquist_positions, scan_shape[1], endpoint=True, dtype=np.float32)
pos[:, 1] = th.from_numpy(np.tile(ls.reshape(1, scan_shape[1]), scan_shape[0])) * skip_nyquist_positions
print(f'K = {K}')
r0 = np.tile(pos[None, ...], (D, 1, 1))
r0 = th.from_numpy(r0).to(device)
#%%
C1_model = C[0]
# C1_model[1] = 10
# C1_model[2] = 20
C12a = C[1]
# C12a[:] = 1e-8
C12b = C[2]
# C12b[:] = 1e-8
C21a = C[3]
# C21a[:] = 1e-8
C21b = C[4]
C23a = C[5]
C23b = C[6]
C3 = C[7]
C32a = C[8]
C32b = C[9]
C34a = C[10]
C34b = C[11]
# %% define data-dependent variables
# Fourier space grid on detector
b = 8
s = 4 * b
e = 20*b
Ms = np.linspace(s,e,int((e-s)/b+1),endpoint=True, dtype=np.int)
print(Ms)
times = np.zeros(Ms.shape)
Bs = np.zeros(Ms.shape)
MY = MX = Ms[0]
fx, fy = 2, 2
NX, NY = MX * fx, MY * fy
detector_shape = | np.array([MY, MX]) | numpy.array |
"""
suncalc-py is ported from suncalc.js under the BSD-2-Clause license.
Copyright (c) 2014, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are
permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of
conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list
of conditions and the following disclaimer in the documentation and/or other materials
provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from datetime import datetime
from typing import Iterable, Tuple
import numpy as np
try:
import pandas as pd
except ImportError:
pd = None
# shortcuts for easier to read formulas
PI = np.pi
sin = np.sin
cos = np.cos
tan = np.tan
asin = np.arcsin
atan = np.arctan2
acos = np.arccos
rad = PI / 180
# sun times configuration (angle, morning name, evening name)
DEFAULT_TIMES = [
(-0.833, 'sunrise', 'sunset'),
(-0.3, 'sunrise_end', 'sunset_start'),
(-6, 'dawn', 'dusk'),
(-12, 'nautical_dawn', 'nautical_dusk'),
(-18, 'night_end', 'night'),
(6, 'golden_hour_end', 'golden_hour')
] # yapf: disable
# date/time constants and conversions
dayMs = 1000 * 60 * 60 * 24
J1970 = 2440588
J2000 = 2451545
def to_milliseconds(date):
# datetime.datetime
if isinstance(date, datetime):
return date.timestamp() * 1000
# Pandas series of Pandas datetime objects
if pd and pd.api.types.is_datetime64_any_dtype(date):
# A datetime-like series coerce to int is (always?) in nanoseconds
return date.astype(int) / 10 ** 6
# Single pandas Timestamp
if pd and isinstance(date, pd.Timestamp):
date = date.to_numpy()
# Numpy datetime64
if np.issubdtype(date.dtype, np.datetime64):
return date.astype('datetime64[ms]').astype('int')
# Last-ditch effort
if pd:
return np.array(pd.to_datetime(date).astype(int) / 10 ** 6)
raise ValueError(f'Unknown date type: {type(date)}')
def to_julian(date):
return to_milliseconds(date) / dayMs - 0.5 + J1970
def from_julian(j):
ms_date = (j + 0.5 - J1970) * dayMs
if pd:
# If a single value, coerce to a pd.Timestamp
if np.prod(np.array(ms_date).shape) == 1:
return pd.to_datetime(ms_date, unit='ms')
# .astype(datetime) is much faster than pd.to_datetime but it only works
# on series of dates, not on a single pd.Timestamp, so I fall back to
# pd.to_datetime for that.
try:
return (pd.Series(ms_date) * 1e6).astype('datetime64[ns, UTC]')
except TypeError:
return pd.to_datetime(ms_date, unit='ms')
# ms_date could be iterable
try:
return np.array([datetime.utcfromtimestamp(x / 1000) for x in ms_date])
except TypeError:
return datetime.utcfromtimestamp(ms_date / 1000)
def to_days(date):
return to_julian(date) - J2000
# general calculations for position
# obliquity of the Earth
e = rad * 23.4397
def right_ascension(l, b):
return atan(sin(l) * cos(e) - tan(b) * sin(e), cos(l))
def declination(l, b):
return asin(sin(b) * cos(e) + cos(b) * sin(e) * sin(l))
def azimuth(H, phi, dec):
return atan(sin(H), cos(H) * sin(phi) - tan(dec) * cos(phi))
def altitude(H, phi, dec):
return asin(sin(phi) * sin(dec) + cos(phi) * cos(dec) * cos(H))
def sidereal_time(d, lw):
return rad * (280.16 + 360.9856235 * d) - lw
def astro_refraction(h):
# the following formula works for positive altitudes only.
# if h = -0.08901179 a div/0 would occur.
h = np.maximum(h, 0)
# formula 16.4 of "Astronomical Algorithms" 2nd edition by <NAME>
# (Willmann-Bell, Richmond) 1998. 1.02 / tan(h + 10.26 / (h + 5.10)) h in
# degrees, result in arc minutes -> converted to rad:
return 0.0002967 / np.tan(h + 0.00312536 / (h + 0.08901179))
# general sun calculations
def solar_mean_anomaly(d):
return rad * (357.5291 + 0.98560028 * d)
def ecliptic_longitude(M):
# equation of center
C = rad * (1.9148 * sin(M) + 0.02 * sin(2 * M) + 0.0003 * sin(3 * M))
# perihelion of the Earth
P = rad * 102.9372
return M + C + P + PI
def sun_coords(d):
M = solar_mean_anomaly(d)
L = ecliptic_longitude(M)
return {'dec': declination(L, 0), 'ra': right_ascension(L, 0)}
# calculations for sun times
J0 = 0.0009
def julian_cycle(d, lw):
return np.round(d - J0 - lw / (2 * PI))
def approx_transit(Ht, lw, n):
return J0 + (Ht + lw) / (2 * PI) + n
def solar_transit_j(ds, M, L):
return J2000 + ds + 0.0053 * sin(M) - 0.0069 * sin(2 * L)
def hour_angle(h, phi, d):
return acos((sin(h) - sin(phi) * sin(d)) / (cos(phi) * cos(d)))
def observer_angle(height):
return -2.076 * np.sqrt(height) / 60
def get_set_j(h, lw, phi, dec, n, M, L):
"""Get set time for the given sun altitude
"""
w = hour_angle(h, phi, dec)
a = approx_transit(w, lw, n)
return solar_transit_j(a, M, L)
def get_position(date, lng, lat):
"""Calculate sun position for a given date and latitude/longitude
"""
lw = rad * -lng
phi = rad * lat
d = to_days(date)
c = sun_coords(d)
H = sidereal_time(d, lw) - c['ra']
return {
'azimuth': azimuth(H, phi, c['dec']),
'altitude': altitude(H, phi, c['dec'])}
def get_times(
date,
lng,
lat,
height=0,
times: Iterable[Tuple[float, str, str]] = DEFAULT_TIMES):
"""Calculate sun times
Calculate sun times for a given date, latitude/longitude, and,
optionally, the observer height (in meters) relative to the horizon
"""
# If inputs are vectors (or some list-like type), then coerce them to
# numpy arrays
#
# When inputs are pandas series, then intermediate objects will also be
# pandas series, and you won't be able to do 2d broadcasting.
try:
len(date)
len(lat)
len(lng)
array_input = True
date = np.array(date)
lat = | np.array(lat) | numpy.array |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.