prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
"""
This file is part of the MaternGaBO library.
Authors: <NAME> and <NAME>, 2021
License: MIT
Contact: <EMAIL>, <EMAIL>
"""
import numpy as np
from BoManifolds.Riemannian_utils.spd_utils import vector_to_symmetric_matrix_mandel
def min_eigenvalue_constraint(x_vec, min_eigenvalue):
"""
This function defines an inequality constraint on the minimum eigenvalue of a SPD matrix.
The value returned by the function is positive if the inequality constraints is satisfied.
Parameters
----------
:param x_vec: SPD matrix in Mandel vector form
:param min_eigenvalue: minimum eigenvalue to satisfy the constraint
Returns
-------
:return: difference between minimum eigenvalue of x and minimum tolerated eigenvalue
"""
x = vector_to_symmetric_matrix_mandel(x_vec)
eigenvalues = np.linalg.eigvals(x)
return np.min(eigenvalues) - min_eigenvalue
def max_eigenvalue_constraint(x_vec, max_eigenvalue):
"""
This function defines an inequality constraint on the maximum eigenvalue of a SPD matrix.
The value returned by the function is positive if the inequality constraints is satisfied.
Parameters
----------
:param x_vec: SPD matrix in Mandel vector form
:param max_eigenvalue: maximum eigenvalue to satisfy the constraint
Returns
-------
:return: difference between maximum tolerated eigenvalue and maximum eigenvalue of x
"""
x = vector_to_symmetric_matrix_mandel(x_vec)
eigenvalues = | np.linalg.eigvals(x) | numpy.linalg.eigvals |
import numpy as np
"""
basic implementation of Recurrent Neural Networks from scrach
to train model to learn to add any number pair when given in binary arrayed format
devloper--><NAME>
"""
class RecurrentNeuralNetwork:
def __init__(self,hidden_size=10):
"""hidden_size is number of neurons in hidden layer"""
self.hidden_size=hidden_size
self.activation={"sigmoid":(self.sigmoid,self.sig_grad),
"RELU":(self.RELU,self.RELU_grad),
"tanh":(self.tanh,self.tanh_grad)}
def fit(self,X,Y):
"""input your training dataset
X: input array 3D
Y: output arrray 3D
axis0- number of data data
axis1 -oredered steps(time steps) of data
axis2- input array for each step"""
#add a slot for threshold weight in each inputs
X=np.append(X,np.ones((X.shape[0],X.shape[1],1)),axis=2)
# store sizes of datasets
self.input_size=X.shape[2]
self.output_size=Y.shape[2]
self.X=X
self.Y=Y
def tanh(self,x):
"""for hyperbolic tangent activation"""
return | np.tanh(x) | numpy.tanh |
from skfda import FDataGrid, FDataBasis
from skfda.datasets import fetch_weather
from skfda.misc.operators import LinearDifferentialOperator
from skfda.misc.regularization import TikhonovRegularization
from skfda.preprocessing.dim_reduction.projection import FPCA
from skfda.representation.basis import Fourier
import unittest
import numpy as np
class FPCATestCase(unittest.TestCase):
def test_basis_fpca_fit_attributes(self):
fpca = FPCA()
with self.assertRaises(AttributeError):
fpca.fit(None)
basis = Fourier(n_basis=1)
# check that if n_components is bigger than the number of samples then
# an exception should be thrown
fd = FDataBasis(basis, [[0.9]])
with self.assertRaises(AttributeError):
fpca.fit(fd)
# check that n_components must be smaller than the number of elements
# of target basis
fd = FDataBasis(basis, [[0.9], [0.7], [0.5]])
with self.assertRaises(AttributeError):
fpca.fit(fd)
def test_discretized_fpca_fit_attributes(self):
fpca = FPCA()
with self.assertRaises(AttributeError):
fpca.fit(None)
# check that if n_components is bigger than the number of samples then
# an exception should be thrown
fd = FDataGrid([[0.5], [0.1]], sample_points=[0])
with self.assertRaises(AttributeError):
fpca.fit(fd)
# check that n_components must be smaller than the number of attributes
# in the FDataGrid object
fd = FDataGrid([[0.9], [0.7], [0.5]], sample_points=[0])
with self.assertRaises(AttributeError):
fpca.fit(fd)
def test_basis_fpca_fit_result(self):
n_basis = 9
n_components = 3
fd_data = fetch_weather()['data'].coordinates[0]
fd_data = FDataGrid( | np.squeeze(fd_data.data_matrix) | numpy.squeeze |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 7 13:06:21 2021
Core code obtained from <NAME>, Barnhart Lab
Class structure and relative imports from Ike Ogbonna, Barnhart Lab
@author: ike
"""
import io
from PIL import Image, ImageSequence
from glob import glob
import numpy as np
import os.path as op
import seaborn as sns
import scipy.ndimage as scn
import skimage.measure as skm
import matplotlib.pyplot as plt
from .pathutils import getPath, changeExt, makeParentDirectory
class MultipageTiff(object):
figures = list()
def __init__(self, imagePages):
if (type(imagePages) == str) and op.isdir(imagePages):
imagePages = glob(getPath(imagePages, "*", ext="tif"))
elif (type(imagePages) == list) and (len(imagePages) == 1):
imagePages = imagePages[0]
if type(imagePages) == str:
multipage = Image.open(imagePages)
self.imageArray = np.array(
[np.array(page) for page in ImageSequence.Iterator(multipage)],
dtype=np.uint8)
elif type(imagePages) == list:
self.imageArray = np.array(
[self.getImage(imageFile) for imageFile in sorted(imagePages)],
dtype=np.uint8)
else:
raise ValueError ("Trying to make an empty image")
@staticmethod
def getImage(imageFile, mask=False):
image = (np.array(Image.open(imageFile)) > 0).astype(int)
image = (np.ma.masked_equal(image, 0) if mask else image)
return image
@staticmethod
def unit8Image(image, normalize=False):
image = image + abs(min(0, np.min(image)))
oldMax = (np.max(image) if np.max(image) != 0 else 1)
newMax = (255 if normalize else min(np.max(image), 255))
image = np.rint((image / oldMax) * newMax).astype(np.uint8)
return image
@staticmethod
def savePillowArray(pilArray, saveFile):
saveFile = changeExt(saveFile, ext="tif")
makeParentDirectory(saveFile)
pilArray[0].save(
saveFile, compression="tiff_deflate",
save_all=True, append_images=pilArray[1:])
@classmethod
def saveImage(cls, image, save, normalize=False):
if image.dtype != "uint8" or normalize:
image = cls.unit8Image(image, normalize)
image = Image.fromarray(image, mode="L")
save = changeExt(save, ext="tif")
image.save(save, compression="tiff_deflate")
@classmethod
def addFigure(
cls, Y, name, yLabel, color="gray", axLabel=None, X=None, dY=None,
lightOn=None, bkg=None):
fig, ax = plt.subplots(figsize=(4, 3), dpi=300)
sns.set_theme(font_scale=0.5)
sns.set_style("ticks")
sns.despine()
X = (list(range(1, Y.shape[0] + 1)) if X is None else X)
if bkg is not None:
ax.plot(X, bkg, color="black", label="background")
if type(color) == list:
for x in range(len(color)):
ax.plot(X, Y[x], color=color[x], label=yLabel[x])
else:
ax.plot(X, Y, color=color, label=yLabel)
ax.set_title(name)
if axLabel is not None:
ax.set_xlabel(axLabel[0])
ax.set_ylabel(axLabel[1])
if dY is not None:
ax.fill_between(X, (Y - dY), (Y + dY), color=color, alpha=0.2)
if lightOn is not None:
plt.axvspan(
lightOn[0], lightOn[1], color="blue", lw=1, alpha=0.1)
ax.legend(loc="upper right")
ax.locator_params(axis="x", nbins=10)
ax.locator_params(axis="y", nbins=6)
ax.set_xlim(left=float(X[0]), right=float(X[-1]))
# ax.set_ylim(bottom=np.min(Y), top=np.max(Y))
# ax.spines['left'].set_position("zero")
# if bkg is not None:
# ax.spines['bottom'].set_position("zero")
buffer = io.BytesIO()
fig.savefig(buffer)
buffer.seek(0)
cls.figures += [Image.open(buffer)]
plt.close("all")
@classmethod
def saveFigures(cls, saveFile):
if len(cls.figures) != 0:
cls.savePillowArray(cls.figures, saveFile)
cls.figures = list()
def __len__(self):
return self.imageArray.shape[0]
def __add__(self, other):
if not isinstance(other, MultipageTiff):
return self
y = min(self.imageArray.shape[1], other.imageArray.shape[1])
x = min(self.imageArray.shape[2], other.imageArray.shape[2])
self.imageArray = np.concatenate(
(self.imageArray[:, :y, :x], other.imageArray[:, :y, :x]),
axis=0)
return self
def __radd__(self, other):
return self + other
def __getitem__(self, idx):
return self.imageArray[idx]
def __setitem__(self, idx, value):
self.imageArray[idx] = value
def __call__(self, other, mode):
if (not isinstance(other, MultipageTiff) or
(self.imageArray.shape[1:] != other.imageArray.shape[1:])):
pass
elif mode == "concatenate":
self.imageArray = np.concatenate(
(self.imageArray, other.imageArray), axis=0)
elif mode == "sum":
idx = min(self.imageArray.shape[0], other.imageArray.shape[0])
self.imageArray = self.imageArray[:idx] + other.imageArray[:idx]
return self
def averageProjection(self):
averageProjection = | np.mean(self.imageArray, axis=0) | numpy.mean |
import logging
import math
import os
import shutil
import time
import cv2
import imageio
import numpy as np
import scipy.sparse
import tensorflow as tf
import utils
from lib import graph, mesh_renderer
from lib.mesh_io import write_obj
logger = logging.getLogger('x')
class BaseModel():
"""
Mesh Convolutional Autoencoder which uses the Chebyshev approximation.
"""
def __init__(self, args, sess, graphs, refer_mesh, image_paths, img_file):
self.sess = sess
self.graph = graphs
mesh_shape = list(refer_mesh['vertices'].shape)
self.gan = args.gan
self.wide = args.wide
self.root_dir = args.root_dir
self.img_file = img_file
self.stage = args.stage
if args.mode == 'test':
self.restore = True
else:
self.restore = args.restore
self.laplacians, self.downsamp_trans, self.upsamp_trans, self.pool_size = utils.init_sampling(
refer_mesh, os.path.join(args.root_dir, 'data', 'params', args.name), args.name)
logger.info("Transform Matrices and Graph Laplacians Generated.")
self.refer_meshes = utils.get_mesh_list(args.name)
self.bfm = utils.BFM_model(self.root_dir, 'data/bfm2009_face.mat')
# color = np.ones_like(refer_mesh['vertices'], dtype=np.uint8)
# color[self.bfm.skin_index] = 0
# write_obj('tests.obj', refer_mesh['vertices'], refer_mesh['faces'], color)
# write_obj('test.obj', refer_mesh['vertices'], refer_mesh['faces'], color)
self.buffer_size = args.buffer_size
self.workers = args.workers
self.num_filter = [16, 16, 16, 32]
self.c_k = 6
self.cam_z = 34
self.z_dim = args.nz
self.num_vert = mesh_shape[0]
self.vert_dim = 6
self.drop_rate = args.drop_rate
self.batch_size = args.batch_size
self.num_epochs = args.epoch
self.img_size = args.img_size
self.learning_rate = args.lr
self.adv_lambda = args.adv_lambda
if args.suffix is None:
self.dir_name = args.name
else:
self.dir_name = args.name + '_' + args.suffix
self.brelu = self.b1relu
self.pool = self.poolwT
self.unpool = self.poolwT
self.dilation_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
(5, 5)).astype(np.float32)[..., np.newaxis]
self.erosion_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
(9, 9)).astype(np.float32)[..., np.newaxis]
# lm_3d_idx = [
# int(x)
# for x in open('data/face_landmarks.txt', 'r').readlines()
# if len(x.strip()) > 1
# ]
# # self.lm_3d_idx = lm_3d_idx[8:9] + lm_3d_idx[17:]
# self.lm_3d_idx = lm_3d_idx[17:]
self.lm_3d_idx = self.bfm.landmark[17:]
train_image_paths, self.val_image_paths, self.test_image_paths = utils.make_paths(
image_paths, os.path.join(self.root_dir, 'data', 'params', args.name, 'image'),
self.root_dir)
self.train_image_paths = np.array(train_image_paths, dtype='object')
num_train = len(self.train_image_paths)
logger.info('Number of train data: %d', num_train)
self.num_batches = num_train // self.batch_size
if args.eval == 0:
self.eval_frequency = self.num_batches
elif args.eval < 1:
self.eval_frequency = int(self.num_batches * args.eval)
else:
self.eval_frequency = int(args.eval)
logger.info('Evaluation frequency: %d', self.eval_frequency)
self.vert_mean = np.reshape(self.bfm.shapeMU, [-1, 3])
self.decay_steps = num_train // args.batch_size
self.regularizers = []
self.regularization = 5e-4
self.ckpt_dir = os.path.join('checkpoints', self.dir_name)
self.summ_dir = os.path.join('summaries', self.dir_name)
self.samp_dir = os.path.join('samples', self.dir_name)
self.build_graph()
def build_graph(self):
"""Build the computational graph of the model."""
# self.graph = tf.Graph()
# with self.graph.as_default():
# Inputs.
with tf.name_scope('inputs'):
data_idxs = [x for x in range(len(self.train_image_paths))]
image_dataset = tf.data.Dataset.from_tensor_slices(data_idxs)
# image_dataset = image_dataset.map(
# lambda start_idx: tf.py_func(self.load_image_bin, [start_idx], [tf.float32, tf.float32]))
image_dataset = image_dataset.map(
lambda start_idx: tf.py_func(self.load_image_bin, [start_idx], tf.float32))
image_dataset = image_dataset.shuffle(buffer_size=self.buffer_size)
image_dataset = image_dataset.batch(self.batch_size)
image_dataset = image_dataset.repeat()
image_iterator = image_dataset.make_one_shot_iterator()
# self.train_rgbas, self.train_2dlms = image_iterator.get_next()
self.train_rgbas = image_iterator.get_next()
self.train_rgbas.set_shape([self.batch_size, self.img_size, self.img_size, 4])
self.train_images = (self.train_rgbas[..., :3] + 1) * 127.5
# self.train_2dlms.set_shape([self.batch_size, len(self.lm_3d_idx), 2])
self.refer_faces = [
tf.convert_to_tensor(x['faces'], dtype=tf.int32, name='refer_faces_{}'.format(i))
for i, x in enumerate(self.refer_meshes)
]
self.ph_rgbas = tf.placeholder(tf.float32, (self.batch_size, self.img_size, self.img_size, 4),
'input_rgbas')
self.input_images = (self.ph_rgbas[..., :3] + 1) * 127.5
# self.input_images = tf.floor((self.ph_rgbas[..., 2::-1] + 1) * 127.5)
self.ph_2dlms = tf.placeholder(tf.float32, (self.batch_size, len(self.lm_3d_idx), 2),
'input_2dlm')
self.ph_ren_lambda = tf.placeholder(tf.float32, (), 'render_lambda')
self.ph_ref_lambda = tf.placeholder(tf.float32, (), 'refine_lambda')
# self.ph_adv_lambda = tf.placeholder(tf.float32, (), 'adv_lambda')
with tf.gfile.GFile(os.path.join(self.root_dir, 'data/FaceReconModel.pb'), 'rb') as f:
face_rec_graph_def = tf.GraphDef()
face_rec_graph_def.ParseFromString(f.read())
def get_emb_coeff(net_name, inputs):
resized = tf.image.resize_images(inputs, [224, 224])
bgr_inputs = resized[..., ::-1]
tf.import_graph_def(face_rec_graph_def, name=net_name, input_map={'input_imgs:0': bgr_inputs})
image_emb = self.graph.get_tensor_by_name(net_name + '/resnet_v1_50/pool5:0')
image_emb = tf.squeeze(image_emb, axis=[1, 2])
coeff = self.graph.get_tensor_by_name(net_name + '/coeff:0')
return image_emb, coeff
image_emb, self.coeff = get_emb_coeff('facerec', self.train_images)
image_emb_test, self.coeff_test = get_emb_coeff('facerec_test', self.input_images)
with tf.gfile.GFile(os.path.join(self.root_dir, 'data/FaceNetModel.pb'), 'rb') as f:
face_net_graph_def = tf.GraphDef()
face_net_graph_def.ParseFromString(f.read())
def get_img_feat(net_name, inputs):
# inputs should be in [0, 255]
# facenet_input = tf.image.resize_image_with_crop_or_pad(inputs, 160, 160)
# TODO: fix resize issue!!!
facenet_input = tf.image.resize_images(inputs, [160, 160])
facenet_input = (facenet_input - 127.5) / 128.0
tf.import_graph_def(face_net_graph_def, name=net_name, input_map={
'input:0': facenet_input,
'phase_train:0': False
})
image_feat = self.graph.get_tensor_by_name(
net_name + '/InceptionResnetV1/Logits/AvgPool_1a_8x8/AvgPool:0')
image_feat = tf.squeeze(image_feat, axis=[1, 2])
return image_feat
image_feat = get_img_feat('facenet', self.train_images)
image_feat_test = get_img_feat('facenet_test', self.input_images)
self.image_emb = tf.concat([image_emb, image_feat], axis=-1)
self.image_emb_test = tf.concat([image_emb_test, image_feat_test], axis=-1)
pred_results = self.inference(self.train_rgbas, self.coeff, self.image_emb)
self.vert_pred = pred_results['vertice']
self.pca_text_pred = pred_results['pca_texture']
self.gcn_text_pred = pred_results['gcn_texture']
self.pca_color_pred = pred_results['pca_color']
self.gcn_color_pred = pred_results['gcn_color']
self.proj_color_pred = pred_results['proj_color']
self.pca_render_pred = pred_results['pca_render_color']
self.gcn_render_pred = pred_results['gcn_render_color']
self.lm_proj_pred = pred_results['lm_project']
# render_mask = self._erosion2d(self.train_rgbas[..., 3:])
render_mask = self.pca_render_pred[..., 3:] * self.train_rgbas[..., 3:]
gcn_render_image = (self.gcn_render_pred[..., :3] + 1) * 127.5
self.gcn_overlay = gcn_render_image[..., :3] * render_mask +\
self.train_images[..., :3] * (1 - render_mask)
gcn_image_feat = get_img_feat('facenet_gcn', self.gcn_overlay)
self.all_loss, self.pca_loss, self.gcn_loss, self.proj_loss, self.refine_loss, self.perc_loss, self.var_loss, self.sym_loss = self.compute_loss(
self.train_rgbas, self.pca_render_pred, self.gcn_render_pred, self.pca_text_pred,
self.gcn_text_pred, self.proj_color_pred, self.pca_color_pred, self.gcn_color_pred,
image_feat, gcn_image_feat, self.regularization)
test_results = self.inference(self.ph_rgbas, self.coeff_test, self.image_emb_test,
is_training=False, reuse=True, get_inter=True)
self.vert_test = test_results['vertice']
self.norm_test = test_results['normal']
self.pca_text_test = test_results['pca_texture']
self.gcn_text_test = test_results['gcn_texture']
self.pca_color_test = test_results['pca_color']
self.gcn_color_test = test_results['gcn_color']
self.proj_color_test = test_results['proj_color']
self.pca_ren_tex_test = test_results['pca_render_text']
self.gcn_ren_tex_test = test_results['gcn_render_text']
self.pca_ren_clr_test = test_results['pca_render_color']
self.gcn_ren_clr_test = test_results['gcn_render_color']
self.lm_proj_test = test_results['lm_project']
# render_mask_test = self._erosion2d(self.ph_rgbas[..., 3:])
render_mask_test = self.pca_ren_clr_test[..., 3:] * self.ph_rgbas[..., 3:]
gcn_ren_image_test = (self.gcn_ren_clr_test[..., :3] + 1) * 127.5
self.gcn_over_test = gcn_ren_image_test[..., :3] * render_mask_test +\
self.input_images[..., :3] * (1 - render_mask_test)
gcn_image_feat_test = get_img_feat('facenet_gcn_test', self.gcn_over_test)
self.test_all_loss, self.test_pca_loss, self.test_gcn_loss, self.test_proj_loss, self.test_refine_loss, self.test_perc_loss, _, _ = self.compute_loss(
self.ph_rgbas, self.pca_ren_clr_test, self.gcn_ren_clr_test, self.pca_text_test,
self.gcn_text_test, self.proj_color_test, self.pca_color_test, self.gcn_color_test,
image_feat_test, gcn_image_feat_test, self.regularization, True)
self.d_loss = None
if self.gan:
real_image = self.train_rgbas[..., :3]
fake_image = self.gcn_overlay / 127.5 - 1.0
self.g_loss, self.d_loss = self.compute_gan_loss(real_image, fake_image)
self.all_loss = self.all_loss + self.g_loss
real_img_test = self.ph_rgbas[..., :3]
fake_img_test = self.gcn_over_test / 127.5 - 1.0
self.test_g_loss, self.test_d_loss = self.compute_gan_loss(real_img_test, fake_img_test,
reuse=True)
self.test_all_loss = self.test_all_loss + self.test_g_loss
self.gen_train, self.dis_train = self.training(self.all_loss, self.d_loss)
# self.op_encoder = self.encoder(self.ph_data, reuse=True)
# self.op_decoder = self.decoder(self.ph_z, reuse=True)
# Initialize variables, i.e. weights and biases.
self.op_init = tf.global_variables_initializer()
# Summaries for TensorBoard and Save for model parameters.
self.op_summary = tf.summary.merge_all()
var_all = tf.global_variables()
trainable_vars = tf.trainable_variables()
bn_vars = [x for x in var_all if 'BatchNorm/moving' in x.name]
global_vars = [x for x in var_all if 'training' in x.name]
vars_to_save = trainable_vars + bn_vars + global_vars
self.op_saver = tf.train.Saver(var_list=vars_to_save, max_to_keep=3)
logger.info('Successfully Build Graph')
def inference(self, images, coeff, image_emb, is_training=True, reuse=False, get_inter=False):
shape_coef, exp_coef, color_coef, angles, gamma, translation = utils.split_bfm09_coeff(coeff)
# shapeMU = tf.constant(self.bfm.shapeMU, dtype=tf.float32)
shapePC = tf.constant(self.bfm.shapePC, dtype=tf.float32)
# expMU = tf.constant(self.bfm.expressionMU, dtype=tf.float32)
expPC = tf.constant(self.bfm.expressionPC, dtype=tf.float32)
colorMU = tf.constant(self.bfm.colorMU, dtype=tf.float32)
colorPC = tf.constant(self.bfm.colorPC, dtype=tf.float32)
vert_offset = tf.einsum('ij,aj->ai', shapePC, shape_coef) + tf.einsum(
'ij,aj->ai', expPC, exp_coef)
vertice = tf.reshape(vert_offset, [self.batch_size, self.num_vert, 3]) + self.vert_mean
vertice = vertice - tf.reduce_mean(self.vert_mean, axis=0, keepdims=True)
# normal = tf.nn.l2_normalize(vertice)
normal = self.compute_norm(vertice)
rotation = utils.rotation_matrix_tf(angles)
vert_trans = tf.matmul(vertice, rotation) + tf.reshape(translation, [self.batch_size, 1, 3])
normal_rot = tf.matmul(normal, rotation)
pca_texture = tf.einsum('ij,aj->ai', colorPC, color_coef) + colorMU
# outputs of pca is [0, 255]
pca_texture = tf.clip_by_value(pca_texture, 0.0, 255.0)
pca_texture = pca_texture / 127.5 - 1
pca_texture = tf.reshape(pca_texture, [self.batch_size, self.num_vert, 3])
# outputs of mesh_decoder using tanh for activation
with tf.variable_scope('render', reuse=reuse):
camera_position = tf.constant([0, 0, 10], dtype=tf.float32)
camera_lookat = tf.constant([0, 0, 0], dtype=tf.float32)
camera_up = tf.constant([0, 1, 0], dtype=tf.float32)
light_positions = tf.tile(tf.reshape(tf.constant([0, 0, 0], dtype=tf.float32), [1, 1, 3]),
[self.batch_size, 1, 1])
light_intensities = tf.tile(tf.reshape(tf.constant([0, 0, 0], dtype=tf.float32), [1, 1, 3]),
[self.batch_size, 1, 1])
fov_y = 12.5936
ambient_color = tf.tile(tf.reshape(tf.constant([1, 1, 1], dtype=tf.float32), [1, 3]),
[self.batch_size, 1])
def postprocess(inputs):
outputs = tf.clip_by_value(inputs, 0.0, 1.0)
outputs = outputs * [[[[2.0, 2.0, 2.0, 1.0]]]] - [[[[1.0, 1.0, 1.0, 0.0]]]]
return outputs
# make color between 0 and 1 before rendering
# outputs will be post processed, [-1, 1] for rgb value
def neural_renderer(vertices, triangles, normals, diffuse_colors):
renders, shift_vert = mesh_renderer.mesh_renderer(
vertices=vertices, triangles=triangles, normals=normals, diffuse_colors=diffuse_colors,
camera_position=camera_position, camera_lookat=camera_lookat, camera_up=camera_up,
light_positions=light_positions, light_intensities=light_intensities,
image_width=self.img_size, image_height=self.img_size, fov_y=fov_y,
ambient_color=ambient_color)
return postprocess(renders), shift_vert
pca_render_text, shift_vert = neural_renderer(vertices=vert_trans,
triangles=self.refer_faces[0],
normals=normal_rot,
diffuse_colors=(pca_texture + 1) / 2)
pca_color = self.illumination((pca_texture + 1) / 2, normal_rot, gamma)
pca_render_color, _ = neural_renderer(vertices=vert_trans, triangles=self.refer_faces[0],
normals=normal_rot, diffuse_colors=pca_color)
pca_color = pca_color * 2 - 1
facial = tf.tan(fov_y / 360.0 * math.pi)
facial = tf.reshape(facial, [-1, 1, 1])
proj_vert = shift_vert[..., :3] * [[[1, -1, -1]]]
proj_vert = proj_vert[..., :2] / facial / proj_vert[..., 2:3]
eros_mask = self._erosion2d(images[..., 3:])
eros_image = tf.concat([images[..., :3], eros_mask], axis=-1)
lm_project = tf.gather(proj_vert, self.lm_3d_idx, axis=1)
proj_color = self.project_color(proj_vert, eros_image)
visiable = tf.cast(normal_rot[..., 2:3] > 0, tf.float32) * proj_color[..., 3:4]
proj_color = tf.concat([proj_color[..., :3] * visiable, visiable], axis=-1)
# TODO:
# refine_input = pca_texture
# refine_input = tf.concat([pca_texture, proj_color[..., :3]], axis=-1)
refine_input = tf.concat([pca_texture, proj_color], axis=-1)
gcn_texture = self.mesh_generator(image_emb, refine_input, reuse=reuse)
with tf.variable_scope('render', reuse=reuse):
gcn_render_text, _ = neural_renderer(vertices=vert_trans, triangles=self.refer_faces[0],
normals=normal_rot, diffuse_colors=(gcn_texture + 1) / 2)
gcn_color = self.illumination((gcn_texture + 1) / 2, normal_rot, gamma)
gcn_render_color, _ = neural_renderer(vertices=vert_trans, triangles=self.refer_faces[0],
normals=normal_rot, diffuse_colors=gcn_color)
gcn_color = gcn_color * 2 - 1
tf.summary.image('pca_render_text', pca_render_text, max_outputs=4)
tf.summary.image('gcn_render_text', gcn_render_text, max_outputs=4)
tf.summary.image('pca_render_color', pca_render_color, max_outputs=4)
tf.summary.image('gcn_render_color', gcn_render_color, max_outputs=4)
logger.info('Successfully Inferenced')
return {
# 'vertice': vert_trans,
'vertice': vertice,
'normal': normal,
'pca_texture': pca_texture, # [-1, 1]
'gcn_texture': gcn_texture, # [-1, 1]
'pca_color': pca_color, # [-1, 1]
'gcn_color': gcn_color, # [-1, 1]
'proj_color': proj_color, # [-1, 1]
'pca_render_text': pca_render_text, # [-1, 1]
'gcn_render_text': gcn_render_text, # [-1, 1]
'pca_render_color': pca_render_color, # [-1, 1]
'gcn_render_color': gcn_render_color, # [-1, 1]
'lm_project': lm_project
}
def compute_loss(self, input_image, pca_render, gcn_render, pca_texture, gcn_texture, proj_color,
pca_color, gcn_color, input_feat, gcn_feat, regularization, get_inter=False):
"""Adds to the inference model the layers required to generate loss."""
with tf.name_scope('loss'):
with tf.name_scope('data_loss'):
skin_mask = self._erosion2d(input_image[..., 3:])
gcn_render_mask = tf.round(gcn_render[..., 3:]) * skin_mask
# pca_render_loss = tf.losses.mean_squared_error(
pca_render_loss = tf.losses.absolute_difference(
predictions=pca_render[..., :3] * gcn_render_mask, labels=input_image[..., :3] *
gcn_render_mask, reduction=tf.losses.Reduction.SUM) / tf.reduce_sum(gcn_render_mask)
# gcn_render_loss = tf.losses.mean_squared_error(
gcn_render_loss = tf.losses.absolute_difference(
predictions=gcn_render[..., :3] * gcn_render_mask, labels=input_image[..., :3] *
gcn_render_mask, reduction=tf.losses.Reduction.SUM) / tf.reduce_sum(gcn_render_mask)
# project_loss_image = tf.losses.mean_squared_error(
project_loss_image = tf.losses.absolute_difference(
predictions=gcn_color * proj_color[..., 3:],
labels=proj_color[..., :3] * proj_color[..., 3:], reduction=tf.losses.Reduction.MEAN)
# project_loss_pca = tf.losses.mean_squared_error(
project_loss_pca = tf.losses.absolute_difference(
predictions=gcn_color * (1 - proj_color[..., 3:]),
labels=pca_color * (1 - proj_color[..., 3:]), reduction=tf.losses.Reduction.MEAN)
project_loss = project_loss_image + 0.3 * project_loss_pca
# refine_loss = tf.losses.mean_squared_error(
refine_loss = tf.losses.absolute_difference(predictions=gcn_texture, labels=pca_texture,
reduction=tf.losses.Reduction.MEAN)
perception_loss = 1 - tf.reduce_mean(utils.cosine(input_feat, gcn_feat))
var_losses = []
gcn_skin_texture = tf.gather(gcn_texture, self.bfm.skin_index, axis=1)
for i in range(3):
_, variance = tf.nn.moments(gcn_skin_texture[..., i], axes=1)
var_losses.append(variance)
var_loss = tf.reduce_mean(var_losses)
sym_diff = tf.gather(gcn_texture, self.bfm.left_index, axis=1) - tf.gather(
gcn_texture, self.bfm.right_index, axis=1)
sym_loss = tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square(sym_diff) + 1e-16, axis=-1)))
# adj_tensor = tf.constant(self.adjacent.reshape(
# [1, self.num_vert, self.num_vert, 1]),
# dtype=tf.int32,
# shape=[1, self.num_vert, self.num_vert, 1])
# coo = self.adjacent.tocoo()
# indices = np.mat([0, self.adjacent.row, self.adjacent.col, 0]).transpose()
# values = np.ones_like(self.adjacent.data, np.float32)
# adj_tensor = tf.SparseTensor(indices, values, self.adjacent.shape)
# # adj_tensor = tf.SparseTensor(self.adjacent.indices,
# # np.clip(self.adjacent.data, 0, 1),
# # self.adjacent.shape)
# expand = tf.ones([1, self.num_vert, self.num_vert, 3], dtype=tf.float32)
# expand = expand * tf.expand_dims(gcn_texture, axis=1)
# exp_trans = tf.transpose(expand, [0, 2, 1, 3])
# # vertical = tf.ones([self.num_vert, self.num_vert, 3], dtype=tf.float32)
# # vertical = vertical * tf.expand_dims(gcn_texture, axis=2)
# smooth_loss = tf.abs((expand - exp_trans) * adj_tensor)
# test = tf.sparse_to_dense(smooth_loss.indices, )
#TODO: need attention
# data_loss = self.ph_ref_lambda * refine_loss + self.ph_ren_lambda * (
# gcn_render_loss + 0.2 * project_loss +
# 0.2 * perception_loss) + 0.1 * sym_loss
data_loss = self.ph_ref_lambda * refine_loss + self.ph_ren_lambda * (
project_loss + 0.2 * perception_loss + 0.5 * sym_loss + 0.01 * var_loss)
# if not get_inter:
# self.skin_mask = skin_mask
# self.gcn_render_mask = gcn_render_mask
# self.gcn_render_image = gcn_render[..., :3]
# self.input_image_rgb = input_image[..., :3]
# self.pca_render_image = pca_render[..., :3]
with tf.name_scope('regularization'):
regularization *= tf.add_n(self.regularizers)
loss = data_loss + regularization
tf.summary.scalar('loss/data_loss', data_loss)
tf.summary.scalar('loss/pca_render_loss', pca_render_loss)
tf.summary.scalar('loss/gcn_render_loss', gcn_render_loss)
tf.summary.scalar('loss/project_loss', project_loss)
tf.summary.scalar('loss/refine_loss', refine_loss)
tf.summary.scalar('loss/perception_loss', perception_loss)
tf.summary.scalar('loss/var_loss', var_loss)
tf.summary.scalar('loss/sym_loss', sym_loss)
tf.summary.scalar('loss/regularization', regularization)
logger.info('Successfully Computed Losses')
return loss, pca_render_loss, gcn_render_loss, project_loss, refine_loss, perception_loss, var_loss, sym_loss
def compute_gan_loss(self, real_image, fake_image, reuse=False, scale=10.0):
t = not reuse
real_score = self.image_disc(real_image, t, reuse=reuse)
fake_score = self.image_disc(fake_image, t, reuse=True)
epsilon = tf.random_uniform([], 0.0, 1.0)
hat_image = epsilon * real_image + (1 - epsilon) * fake_image
hat_score = self.image_disc(hat_image, t, reuse=True)
hat_gradient = tf.gradients(hat_score, hat_image)[0]
hat_gradient = tf.sqrt(tf.reduce_sum(tf.square(hat_gradient), axis=[1, 2, 3]))
hat_gradient = tf.reduce_mean(tf.square(hat_gradient - 1.0) * scale)
g_loss = -self.adv_lambda * tf.reduce_mean(fake_score)
d_loss = self.adv_lambda * (tf.reduce_mean(fake_score) - tf.reduce_mean(real_score) +
hat_gradient)
logger.info('Successfully Computed GAN Losses')
return g_loss, d_loss
def training(self, g_loss, d_loss=None, decay_rate=0.98):
"""Adds to the loss model the Ops required to generate and apply gradients."""
with tf.name_scope('training'):
# Learning rate.
global_step = tf.Variable(0, name='global_step', trainable=False)
if decay_rate != 1:
learning_rate = tf.train.exponential_decay(self.learning_rate, global_step,
self.decay_steps, decay_rate, staircase=True)
else:
learning_rate = self.learning_rate
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
check_grads = []
def check_gradients(grads):
for i, (grad, var) in enumerate(grads):
if grad is None:
logger.info('warning: %s has no gradient', var.op.name)
else:
grads[i] = (tf.clip_by_norm(grad, 5), var)
check_grads.append(tf.check_numerics(grad, "error occur"))
all_vars = tf.trainable_variables()
mesh_gen_vars = [x for x in all_vars if x.name.startswith('mesh_generator')]
g_grads = optimizer.compute_gradients(g_loss, var_list=mesh_gen_vars)
check_gradients(g_grads)
if d_loss is not None:
image_dis_vars = [x for x in all_vars if x.name.startswith('image_disc')]
d_grads = optimizer.compute_gradients(d_loss, var_list=image_dis_vars)
check_gradients(d_grads)
with tf.control_dependencies(check_grads):
op_g_grad = optimizer.apply_gradients(g_grads, global_step=global_step)
if d_loss is not None:
op_d_grad = optimizer.apply_gradients(d_grads, global_step=global_step)
# The op return the learning rate.
update_bn_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies([op_g_grad] + update_bn_ops):
gen_train = tf.identity(learning_rate, name='control')
dis_train = None
if d_loss is not None:
with tf.control_dependencies([op_d_grad] + update_bn_ops):
dis_train = tf.identity(learning_rate, name='control')
logger.info('Successfully Build Training Optimizer')
return gen_train, dis_train
def fit(self):
for d in [self.ckpt_dir, self.summ_dir, self.samp_dir]:
if not os.path.isdir(d):
os.makedirs(d)
logger.info('Start Fitting Model')
t_process, t_wall = time.clock(), time.time()
shutil.rmtree(self.summ_dir, ignore_errors=True)
writer = tf.summary.FileWriter(self.summ_dir)
# shutil.rmtree(self.ckpt_dir, ignore_errors=True)
if not os.path.isdir(self.ckpt_dir):
os.makedirs(self.ckpt_dir)
path = os.path.join(self.ckpt_dir, 'model')
if not os.path.isdir(self.samp_dir):
os.makedirs(self.samp_dir)
self.sess.run(self.op_init)
if self.restore:
self._restore_ckpt()
self.restore = False
val_image = utils.load_images(self.val_image_paths, self.img_size, alpha=True, landmark=False)
step = 0
for epoch in range(self.num_epochs):
ren_lambda = np.clip(0.2 * epoch, 0, 1).astype(np.float32)
ref_lambda = np.clip(1 - ren_lambda, 0.2, 1).astype(np.float32)
logger.info('render_lambda: %f, refine_lambda: %f', ren_lambda, ref_lambda)
feed_dict = {self.ph_ren_lambda: ren_lambda, self.ph_ref_lambda: ref_lambda}
fetches = [
self.gen_train, self.all_loss, self.pca_loss, self.gcn_loss, self.proj_loss,
self.refine_loss, self.perc_loss, self.var_loss, self.sym_loss
]
if self.gan:
dis_fetches = fetches + [self.g_loss, self.d_loss]
for batch in range(self.num_batches):
try:
train_dis = self.gan and ren_lambda > 1e-5
# train_dis = True
if train_dis:
for _ in range(5):
_ = self.sess.run(self.dis_train, feed_dict=feed_dict)
_, all_loss, pca_loss, gcn_loss, proj_loss, refine_loss, perc_loss, var_loss, sym_loss, g_loss, d_loss = self.sess.run(
dis_fetches, feed_dict=feed_dict)
else:
_, all_loss, pca_loss, gcn_loss, proj_loss, refine_loss, perc_loss, var_loss, sym_loss = self.sess.run(
fetches, feed_dict=feed_dict)
if batch % 10 == 0:
log_str = ' all_loss: {:.3e}, pca_loss: {:.3e}, gcn_loss: {:.3e}, proj_loss: {:.3e}, refine_loss: {:.3e}, perc_loss: {:.3e}, var_loss: {:.3e}, sym_loss: {:.3e}'.format(
all_loss, pca_loss, gcn_loss, proj_loss, refine_loss, perc_loss, var_loss, sym_loss)
if train_dis:
log_str += ', g_loss: {:.3e}, d_loss: {:.3e}'.format(g_loss, d_loss)
logger.info('batch {} / {} (epoch {} / {}):'.format(batch, self.num_batches, epoch,
self.num_epochs))
logger.info(log_str)
except Exception as e:
logger.info('Error Occured in Sess Run.')
logger.debug(e)
# Periodical evaluation of the model.
if batch % self.eval_frequency == 0:
string, results = self.evaluate(val_image)
logger.info(' validation {}'.format(string))
logger.info(' time: {:.0f}s (wall {:.0f}s)'.format(time.clock() - t_process,
time.time() - t_wall))
self.save_sample(results, step, val_image, idx=0)
# Summaries for TensorBoard.
summary = tf.Summary(
value=[tf.Summary.Value(tag='validation/loss', simple_value=results['all_loss'])])
writer.add_summary(summary, step)
# Save model parameters (for evaluation).
self.op_saver.save(self.sess, path, global_step=step)
step += 1
writer.close()
def save_sample(self, results, step, val_image, val_landmark=None, sample_dir=None, idx=0,
only_skin=False):
if sample_dir is None:
sample_dir = self.samp_dir
input_image = utils.img_denormalize(val_image[idx])
vertice = results['vertices'][idx]
normal = results['normals'][idx]
pca_texture = utils.img_denormalize(results['pca_texts'][idx])
gcn_texture = utils.img_denormalize(results['gcn_texts'][idx])
pca_color = utils.img_denormalize(results['pca_colors'][idx])
gcn_color = utils.img_denormalize(results['gcn_colors'][idx])
proj_color = utils.img_denormalize(results['proj_color'][idx])
pca_ren_tex = utils.img_denormalize(results['pca_ren_texs'][idx])
gcn_ren_tex = utils.img_denormalize(results['gcn_ren_texs'][idx])
pca_ren_clr = utils.img_denormalize(results['pca_ren_clrs'][idx])
gcn_ren_clr = utils.img_denormalize(results['gcn_ren_clrs'][idx])
lm_proj = results['lm_projs'][idx]
# input_image = np.clip(
# input_image.astype(np.int32) + [[[0, 0, 0, 64]]], 0,
# 255).astype(np.uint8)
imageio.imsave(os.path.join(sample_dir, '{}_input.png'.format(step)), input_image[..., :3])
# imageio.imsave(os.path.join(sample_dir, '{}_mask.png'.format(step)),
# input_image[..., 3])
if val_landmark is None:
lm_image = input_image[..., :3]
else:
lm_image = utils.draw_image_with_lm(None, input_image[..., :3], val_landmark[idx],
self.img_size, (0, 0, 255))
utils.draw_image_with_lm(os.path.join(sample_dir, '{}_lm_proj.png'.format(step)), lm_image,
lm_proj, self.img_size)
render_mask = pca_ren_clr[:, :, 3:] // 255
if only_skin:
render_mask = render_mask * (input_image[..., 3:] // 255)
# render_mask = cv2.erode(render_mask, np.ones((5, 5), dtype=np.uint8), iterations=5)
imageio.imsave(os.path.join(sample_dir, '{}_mask.png'.format(step)), render_mask * 255)
def save_render(inputs, name, draw_lm=False):
image = inputs[:, :, :3] * render_mask + input_image[:, :, :3] * (1 - render_mask)
if draw_lm:
utils.draw_image_with_lm(os.path.join(sample_dir, name), image, lm_proj, self.img_size)
else:
imageio.imsave(os.path.join(sample_dir, name), image)
# imageio.imsave(os.path.join(sample_dir, '{}_gcn.png'.format(step)), gcn_ren_clr)
save_render(pca_ren_tex, '{}_pca_ren_tex.png'.format(step))
save_render(gcn_ren_tex, '{}_gcn_ren_tex.png'.format(step))
save_render(pca_ren_clr, '{}_pca_ren_clr.png'.format(step))
save_render(gcn_ren_clr, '{}_gcn_ren_clr.png'.format(step))
write_obj(os.path.join(sample_dir, '{}_pca_texture.obj'.format(step)), vertice,
self.refer_meshes[0]['faces'], pca_texture, normal)
write_obj(os.path.join(sample_dir, '{}_gcn_texture.obj'.format(step)), vertice,
self.refer_meshes[0]['faces'], gcn_texture, normal)
write_obj(os.path.join(sample_dir, '{}_pca_color.obj'.format(step)), vertice,
self.refer_meshes[0]['faces'], pca_color, normal)
write_obj(os.path.join(sample_dir, '{}_gcn_color.obj'.format(step)), vertice,
self.refer_meshes[0]['faces'], gcn_color, normal)
write_obj(os.path.join(sample_dir, '{}_proj_color.obj'.format(step)), vertice,
self.refer_meshes[0]['faces'], proj_color, normal)
logger.info('Sample %s saved!', step)
def evaluate(self, images):
# t_process, t_wall = time.clock(), time.time()
size = images.shape[0]
result_list = []
for begin in range(0, size, self.batch_size):
end = begin + self.batch_size
end = min([end, size])
batch_image = np.zeros((self.batch_size, images.shape[1], images.shape[2], images.shape[3]))
tmp_image = images[begin:end]
batch_image[:end - begin] = tmp_image
# batch_landmark = None
# if landmarks is not None:
# batch_landmark = np.zeros((self.batch_size, len(self.lm_3d_idx), 2))
# tmp_landmark = landmarks[begin:end]
# batch_landmark[:end - begin] = tmp_landmark
result = self.predict(batch_image)
result_list.append(result)
results = {
'vertices': np.concatenate([x['vertice'] for x in result_list]),
'normals': np.concatenate([x['normal'] for x in result_list]),
'pca_texts': np.concatenate([x['pca_text'] for x in result_list]),
'gcn_texts': | np.concatenate([x['gcn_text'] for x in result_list]) | numpy.concatenate |
import numpy as np
from gurobipy import *
import time
import os
import sys
from Utilities import CalcRhoAndBetaVectors
from UtilitiesOptimization import CalculateLPGurobi, CalculateQuadGurobi,\
SubgrAlgSavPrimDualObjInd, SubgrAlgSavPrimDualObjFn_L2Ind, ExtendSizeCamps, OptimalBids, OptimalX
#
#
## Simulation Code
## To make the simulation code faster and easier to read (in particular the greedy heuristic)
## we would like to change the vector of click-through rates 'vector_ctr', vector of
## revenues 'vector_q', of revenue times the click trhough rate 'vector_qctr', and others
## into a matrix of size number of campaigns times number of impressions.
## That's done in the following function.
def CreateMatR_ctr_Rctr_Rhoctr(numCampaigns, num_impressions, num_edges, \
index_Imps, index_sizeCamps, vector_q, vector_ctr, \
vector_qctr, PPFTable, numericBeta):
## mat_r_by_Imp is a matrix in which each column 'i'
## represents the valuations of all campaigns in order for
## an impression of type 'i'. If the campaign is not interested
## in the impression a zero value is entered in that position.
mat_r_by_Imp=np.zeros((numCampaigns, num_impressions))
mat_ctr=np.zeros((numCampaigns, num_impressions))
mat_rctr_by_Imp=np.zeros((numCampaigns, num_impressions))
mat_rhoctr_by_Imp=np.zeros((numCampaigns, num_impressions))
mat_rctrBetarho_by_Imp=np.zeros((numCampaigns, num_impressions))
[rho_rctr, beta_rctr]=CalcRhoAndBetaVectors(vector_qctr, num_edges, \
index_Imps, PPFTable, numericBeta)
for i in range(num_impressions):
count=0
aux=0
indexes=np.arange(num_edges)[(index_Imps==i)]
sizeIndexes=len(indexes)
if(sizeIndexes!=0):
pos=indexes[aux]
for j in range(numCampaigns):
impInCamp=index_sizeCamps[j]
if (pos<(count+impInCamp)):
mat_r_by_Imp[j, i]=vector_q[pos]
mat_ctr[j, i]=vector_ctr[pos]
mat_rctr_by_Imp[j, i]=vector_qctr[pos]
mat_rhoctr_by_Imp[j, i]=rho_rctr[pos]
mat_rctrBetarho_by_Imp[j, i] =(vector_qctr[pos]-\
beta_rctr[pos])*rho_rctr[pos]
if(aux<sizeIndexes-1):
aux+=1
pos=indexes[aux]
else:
# No more campaigns use that impression
pos=num_edges
count+=impInCamp
return [mat_r_by_Imp, mat_ctr, mat_rctr_by_Imp, mat_rhoctr_by_Imp, \
mat_rctrBetarho_by_Imp]
# ### Greedy Heuristic Procedure
# When the greedy heuristic has the opportunity to bid for given impression type
# it first check the budget to see which of the interested campaigns has enough
# money to pay in case a click is done and then it bids for the campaign
# that maximizes the profit. Given that Ipinyou assumes second price auctions,
# the greedy heuristic bids for the campaign with highest revenue times ctr
# that still has enough money to pay for the impression in case of winning.
# 'CreateMatrixBidAndX' transforms bid and allocation vectors into matrices. This code
# will be used by all methods in the simulation step as we will obtain bidding and
# allocation vectors for Indicator and Indicator + $\ell_2$ once we run our primal-dual
# methodology and the greedy step has bidding prices equal to $r_{ik}$. Given that we run
# our primal-dual methodogy only once per simulation (which is clearly sub-optimal), the
# allocation vector is enough to decide in behalf of which campaign to bid in behalf of
# for a whole simulation.
def CreateMatrixBidAndX(numCampaigns, num_impressions, num_edges, \
index_Imps, index_sizeCamps, bid_vector, x):
mat_bid_by_Imp=np.zeros((numCampaigns, num_impressions))
mat_x_by_Imp=np.zeros((numCampaigns, num_impressions))
for i in range(num_impressions):
count=0
aux=0
indexes=np.arange(num_edges)[(index_Imps==i)]
sizeIndexes=len(indexes)
if(sizeIndexes!=0):
pos=indexes[aux]
for j in range(numCampaigns):
impInCamp=index_sizeCamps[j]
if (pos<(count+impInCamp)):
mat_bid_by_Imp[j, i]=bid_vector[pos]
mat_x_by_Imp[j, i]=x[pos]
if(aux<sizeIndexes-1):
aux+=1
pos=indexes[aux]
else:
# No more campaigns use that impression
# This should be done with a while.
pos=num_edges
count+=impInCamp
return [mat_bid_by_Imp, mat_x_by_Imp]
# For each impression type $i$, the probability of not bidding for it is
# $$1-\sum_{k \in \mathcal{K}_i} x_{ik}$$ We obtain the vector of probaility of not bidding
# for each impression type in CreateProbOfBidding. In Case we decide to bid for a given
# impression type, FastRandomChoice helps us to decide for which campaign to bid in behalf of.
# It receives as inputs the vector 'condProbVector' which represent the probability of bidding
# in behalf of the camapaigns that have enough budget to bid (this vector entries are
# non-negative and sum up to one), and a number 'unif_value' which we assumed was sampled
# for a uniform random variable. Then, it uses a standard trick to decide on behalf of
# which campaign to bid in behalf of. The campaign number which returns is relative to the
# campaigns that have enough budget to bid on behalf of.
def CreateProbOfBidding(mat_x_by_Imp):
return np.sum(mat_x_by_Imp, axis=0)
def FastRandomChoice(condProbVector, unif_value):
auxPartSum=0.0
for i in range(len(condProbVector)):
if auxPartSum+condProbVector[i]<unif_value:
return i
else:
auxPartSum+=condProbVector[i]
return len(condProbVector)-1
### Initializing data for each method (Greedy and derived from our method)
# When we run a simulation we would like to save by campaign the amount of bids made,
# won, and clicks made by each impression type. That info is saved in cartBids, cartWon,
# and cartClicked resp. Also, as general statistics we would like to know the cost, revenue
# and profit each impression type brough for the DSP. That info is saved in costBids, revenue,
# and profit resp.
# Function CreateDataForSimulation creates all the data needed to tart the simulation for the
# Greedy and a non-Greedy method.
def CreateIndicatorSimulation(numCampaigns, num_impressions, vector_m):
budget=np.zeros(numCampaigns)
budget[:]=vector_m
cartBids=np.zeros((numCampaigns, num_impressions))
cartWon=np.zeros((numCampaigns, num_impressions))
cartClicked=np.zeros((numCampaigns, num_impressions))
costBids=np.zeros(num_impressions)
revenue=np.zeros(num_impressions)
profit=np.zeros(num_impressions)
return [budget, cartBids, cartWon, cartClicked, \
costBids, revenue, profit]
def CreateDataForSimulation(bidFound, xFound, numCampaigns, \
num_impressions, num_edges, index_Imps, index_sizeCamps, vector_q, \
vector_ctr, vector_qctr, vector_m, PPFTable, numericBeta):
[budgetLR, cartBidsLR, cartWonLR, cartClickedLR, costBidsLR, revenueLR, \
profitLR]=CreateIndicatorSimulation(numCampaigns, \
num_impressions, vector_m)
[budgetGr, cartBidsGr, cartWonGr, cartClickedGr, costBidsGr, revenueGr, \
profitGr]=CreateIndicatorSimulation(numCampaigns, \
num_impressions, vector_m)
[mat_r_by_Imp, mat_ctrTest, mat_rctr_by_Imp, mat_rhoctr_by_Imp, mat_rctrBetarho_by_Imp]=\
CreateMatR_ctr_Rctr_Rhoctr(numCampaigns, num_impressions, num_edges, \
index_Imps, index_sizeCamps, vector_q, vector_ctr, vector_qctr, PPFTable, numericBeta)
[mat_bid_by_ImpLR, mat_x_by_ImpLR]=CreateMatrixBidAndX(numCampaigns, \
num_impressions, num_edges, index_Imps, index_sizeCamps, \
bidFound, xFound)
probBidLR=CreateProbOfBidding(mat_x_by_ImpLR)
return [budgetLR, cartBidsLR, cartWonLR, cartClickedLR, costBidsLR, \
revenueLR, profitLR, budgetGr, cartBidsGr, cartWonGr, \
cartClickedGr, costBidsGr, revenueGr, profitGr, mat_r_by_Imp, \
mat_ctrTest, mat_rctr_by_Imp, mat_rhoctr_by_Imp, mat_rctrBetarho_by_Imp, \
mat_bid_by_ImpLR, mat_x_by_ImpLR, probBidLR]
# Comments About the Implementation
# - We win an auction only if the bid_amount is higher than the market price that appear in the Ipinyou Log.
# In case of winning the auction we then need to check if a click occurs. We update the revenue, profit,
# budget, cartWon, costBids, and cartClicked accordingly.
# - For the indicator and indicator+$\ell_2$ case we only need to check the allocation vector to decide
# the campaign to bid in behalf of (allocation vector that comes from running other primal-dual procedure).
# Simulation code for Indicator, Indicator + $\ell_2$, and Greedy
def RunIndL2IndAndGreedy(numCampaigns, num_impressions, num_edges, index_Imps, \
index_sizeCamps, PPFTable, numericBeta, vector_q, vector_m, vector_ctrTrain, vector_rctrTrain, \
vector_ctrTest, vector_rctrTest, bidsInd, xInd, bidsL2Ind, xL2Ind, tau, ImpInOrder, MPInOrder,\
impNames, listCampPerImp):
## We first initialize the budgets used, matrices of bids made, won, and clicked for
## three methods.
[budgetInd, cartBidsInd, cartWonInd, cartClickedInd, costBidsInd, \
revenueInd, profitInd, _, _, _, _, _, _, _, _, _, mat_rctr_by_ImpTrain, _, _, \
mat_bid_by_ImpInd, mat_x_by_ImpInd, probBidInd]=CreateDataForSimulation(bidsInd, \
xInd, numCampaigns, num_impressions, num_edges, index_Imps, index_sizeCamps, \
vector_q, vector_ctrTrain, vector_rctrTrain, vector_m, PPFTable, numericBeta)
[budgetL2Ind, cartBidsL2Ind, cartWonL2Ind, cartClickedL2Ind, costBidsL2Ind, \
revenueL2Ind, profitL2Ind, budgetGr, cartBidsGr, cartWonGr, \
cartClickedGr, costBidsGr, revenueGr, profitGr, mat_r_by_Imp, \
mat_ctrTest, _, _, _, \
mat_bid_by_ImpL2Ind, mat_x_by_ImpL2Ind, probBidL2Ind]=CreateDataForSimulation(bidsL2Ind, \
xL2Ind, numCampaigns, num_impressions, num_edges, index_Imps, index_sizeCamps, \
vector_q, vector_ctrTest, vector_rctrTest, vector_m, PPFTable, numericBeta)
## Now we simulate
# campaignsArange=np.arange(numCampaigns)
## Instead of np.random.uniform every time we need a random uniform we call
## the method at the beginnning of the simulation and save all uniform
## samples we need.
allUnifToUse = np.random.uniform(0.0, 1.0, (len(ImpInOrder)*3))
# uniOnline=False
## We read the test log in irder of how the impressions type appear.
for i,clusterId in enumerate(ImpInOrder):
impType=impNames.index(clusterId)
unifs=allUnifToUse[(3*i):(3*(i+1))]
## Market Price that appears in the test log.
mp_value=MPInOrder[i]
## Update Ind
indBuyerInd=0
tryToBidInd=False
bidAmountInd=0.0
## First we check if the method would try to bid for the impression
## or would just discard it immediately
if unifs[0] <= probBidInd[impType]:
## For each campaign we check if there is any that has enough budget to bid and that
## also wants to do so.
bidUsingInd=False
# print('budgetInd[listCampPerImp[impType]]: '+str(budgetInd[listCampPerImp[impType]]))
# aux53 =(mat_r_by_Imp[listCampPerImp[impType],impType] <= budgetInd[listCampPerImp[impType]])
indInterested = (mat_x_by_ImpInd[listCampPerImp[impType],impType]>0) *\
(mat_r_by_Imp[listCampPerImp[impType],impType] <= budgetInd[listCampPerImp[impType]])
if np.sum(indInterested) >0:
bidUsingInd= True
if bidUsingInd:
## There is at least one campaign that wants to bid.
posInt=listCampPerImp[impType][indInterested]
## Conditional probability assuming that the method is going to bid.
## This conditional probability excludes all those campaigns
## that do not want to bid
condProbInterested=mat_x_by_ImpInd[posInt, impType]
condProbInterested*=1.0/np.sum(condProbInterested)
auxPartSum=0.0
## Now we will choose in behalf of which campaign to bid for.
numInterest = len(condProbInterested)
auxPosForindBuyerInd = numInterest-1
z = 0
while z<numInterest:
auxPartSum += condProbInterested[z]
if auxPartSum >= unifs[1]:
## If we exceed unifs[1] go out of the loop
auxPosForindBuyerInd=z
z+=numInterest
z += 1
indBuyerInd=posInt[auxPosForindBuyerInd]
tryToBidInd=True
bidAmountInd=mat_bid_by_ImpInd[indBuyerInd, impType]
## If tryToBidInd == True, we will try to bid inbehalf of campaign indBuyerInd
## bidding an amount of bidAmountInd.
if(tryToBidInd):
## We first register that we are bidding on behalf of indBuyerInd for an
## impression of type impType
cartBidsInd[indBuyerInd, impType]+=1
## We win the auction only if the value we are bidding is higher
## than the market price observed by Ipinyou
if bidAmountInd>= mp_value:
## Impression Won. Register that we won the impression and the change
## in cost and profit.
cartWonInd[indBuyerInd, impType]+=1
costBidsInd[impType]-=mp_value
profitInd[impType]-=mp_value
# Now we need to check if the ad was clicked.
probOfClick=mat_ctrTest[indBuyerInd, impType]
if (unifs[2]<=probOfClick):
## User clicked, increase revenue and charge the campaign (i.e. DSP wins money).
cartClickedInd[indBuyerInd, impType]+=1
payment=mat_r_by_Imp[indBuyerInd, impType]
revenueInd[impType]+=payment
profitInd[impType]+=payment
budgetInd[indBuyerInd]-=payment
## Update L2Ind (Same code as done before for the pure indicator case)
indBuyerL2Ind=0
tryToBidL2Ind=False
bidAmountL2Ind=0.0
if unifs[0] <= probBidL2Ind[impType]:
## For each campaign we check if there is any that has enough budget to bid and that
## also wants to do so.
bidUsingL2Ind=False
indInterested =\
(mat_r_by_Imp[listCampPerImp[impType],impType] <= budgetL2Ind[listCampPerImp[impType]]) * \
(mat_x_by_ImpL2Ind[listCampPerImp[impType],impType]>0)
if np.sum(indInterested) >0:
bidUsingL2Ind= True
if bidUsingL2Ind:
## There is at least one campaign that wants to bid.
posInt=listCampPerImp[impType][indInterested]
## Conditional probability assuming that the method is going to bid.
## This conditional probability excludes all those campaigns
## that do not want to bid
condProbInterested=mat_x_by_ImpL2Ind[posInt, impType]
condProbInterested*=1.0/np.sum(condProbInterested)
auxPartSum=0.0
## Now we will choose in behalf of which campaign to bid for.
numInterest = len(condProbInterested)
auxPosForindBuyerL2Ind = numInterest-1
z = 0
while z <numInterest:
auxPartSum += condProbInterested[z]
if auxPartSum >= unifs[1]:
## If we exceed unifs[1] go out of the loop
auxPosForindBuyerL2Ind=z
z+=numInterest
z += 1
indBuyerL2Ind=posInt[auxPosForindBuyerL2Ind]
tryToBidL2Ind=True
bidAmountL2Ind=mat_bid_by_ImpL2Ind[indBuyerL2Ind, impType]
if(tryToBidL2Ind):
cartBidsL2Ind[indBuyerL2Ind, impType]+=1
if bidAmountL2Ind>= mp_value:
## Impression Won.
cartWonL2Ind[indBuyerL2Ind, impType]+=1
costBidsL2Ind[impType]-=mp_value
profitL2Ind[impType]-=mp_value
# Now we need to check if the ad was clicked.
probOfClick=mat_ctrTest[indBuyerL2Ind, impType]
if (unifs[2]<=probOfClick):
## User clicked, increase revenue and charge the campaign.
cartClickedL2Ind[indBuyerL2Ind, impType]+=1
payment=mat_r_by_Imp[indBuyerL2Ind, impType]
revenueL2Ind[impType]+=payment
profitL2Ind[impType]+=payment
budgetL2Ind[indBuyerL2Ind]-=payment
### Now we update the Greedy Policy
## The greedy heuristic bids for the campaign which stills have remaining
## budget and from thos bid for the one with highest r times ctr.
## The previous is true as Ipinyou assumes second price auctions.
indBuyerGr=-1
bidAmountGr=0.0
tryToBidGr=False
indInterested =\
mat_r_by_Imp[listCampPerImp[impType],impType] <= budgetGr[listCampPerImp[impType]]
if np.sum(indInterested) > 0:
posInt=listCampPerImp[impType][indInterested]
indBuyerGr = posInt[np.argmax(mat_rctr_by_ImpTrain[posInt,impType])]
bidAmountGr=mat_rctr_by_ImpTrain[indBuyerGr, impType]
tryToBidGr=True
## If tryToBidGr == True, we will bid in behalf of campaign 'indBuyerGr'
## the amount 'bidAmountGr'
if (tryToBidGr):
## Save that we are bidding in behalf of 'indBuyerGr' for an impression of
## type 'impType'
cartBidsGr[indBuyerGr, impType]+=1
## We win the auction only if the value we are bidding is higher
## than the market price observed by Ipinyou.
if bidAmountGr>= mp_value:
## Impression Won.
cartWonGr[indBuyerGr, impType]+=1
costBidsGr[impType]-=mp_value
profitGr[impType]-=mp_value
# Now we need to check if the ad was clicked.
probOfClick=mat_ctrTest[indBuyerGr, impType]
if (unifs[2]<=probOfClick):
## User clicked, increase revenue and charge the campaign.
cartClickedGr[indBuyerGr, impType]+=1
payment=mat_r_by_Imp[indBuyerGr, impType]
revenueGr[impType]+=payment
profitGr[impType]+=payment
budgetGr[indBuyerGr]-=payment
return [budgetInd, cartBidsInd, cartWonInd, cartClickedInd, costBidsInd, \
revenueInd, profitInd, budgetL2Ind, cartBidsL2Ind, cartWonL2Ind, \
cartClickedL2Ind, costBidsL2Ind, revenueL2Ind, profitL2Ind, budgetGr, \
cartBidsGr, cartWonGr, cartClickedGr, costBidsGr, revenueGr, profitGr]
def RunInd_L2_L2Ind_Greedy(numCampaigns, num_impressions, num_edges, index_Imps, \
index_sizeCamps, PPFTable, numericBeta, vector_q, vector_m, vector_ctrTrain, vector_rctrTrain, \
vector_ctrTest, vector_rctrTest, bidsInd, xInd, bidsL2, xL2, bidsL2Ind, xL2Ind, tau,\
ImpInOrder, MPInOrder, impNames, listCampPerImp):
## We first initialize the budgets used, matrices of bids made, won, and clicked for
## three methods.
[budgetInd, cartBidsInd, cartWonInd, cartClickedInd, costBidsInd, \
revenueInd, profitInd, _, _, _, _, _, _, _, _, _, mat_rctr_by_ImpTrain, _, _, \
mat_bid_by_ImpInd, mat_x_by_ImpInd, probBidInd]=CreateDataForSimulation(bidsInd, \
xInd, numCampaigns, num_impressions, num_edges, index_Imps, index_sizeCamps, \
vector_q, vector_ctrTrain, vector_rctrTrain, vector_m, PPFTable, numericBeta)
[budgetL2Ind, cartBidsL2Ind, cartWonL2Ind, cartClickedL2Ind, costBidsL2Ind, \
revenueL2Ind, profitL2Ind, budgetGr, cartBidsGr, cartWonGr, \
cartClickedGr, costBidsGr, revenueGr, profitGr, mat_r_by_Imp, \
mat_ctrTest, _, _, _, \
mat_bid_by_ImpL2Ind, mat_x_by_ImpL2Ind, probBidL2Ind]=CreateDataForSimulation(bidsL2Ind, \
xL2Ind, numCampaigns, num_impressions, num_edges, index_Imps, index_sizeCamps, \
vector_q, vector_ctrTest, vector_rctrTest, vector_m, PPFTable, numericBeta)
[budgetL2, cartBidsL2, cartWonL2, cartClickedL2, costBidsL2, \
revenueL2, profitL2, _, _, _, _, _, _, _, _, _, _, _, _, \
mat_bid_by_ImpL2, mat_x_by_ImpL2, probBidL2]=CreateDataForSimulation(bidsL2, \
xL2, numCampaigns, num_impressions, num_edges, index_Imps, index_sizeCamps, \
vector_q, vector_ctrTest, vector_rctrTest, vector_m, PPFTable, numericBeta)
## Now we simulate
# campaignsArange=np.arange(numCampaigns)
## Instead of np.random.uniform every time we need a random uniform we call
## the method at the beginnning of the simulation and save all uniform
## samples we need.
allUnifToUse = np.random.uniform(0.0, 1.0, (len(ImpInOrder)*3))
# uniOnline=False
## We read the test log in irder of how the impressions type appear.
for i,clusterId in enumerate(ImpInOrder):
impType=impNames.index(clusterId)
unifs=allUnifToUse[(3*i):(3*(i+1))]
## Market Price that appears in the test log.
mp_value=MPInOrder[i]
## Update Ind
indBuyerInd=0
tryToBidInd=False
bidAmountInd=0.0
## First we check if the method would try to bid for the impression
## or would just discard it immediately
if unifs[0] <= probBidInd[impType]:
## For each campaign we check if there is any that has enough budget to bid and that
## also wants to do so.
bidUsingInd=False
# print('budgetInd[listCampPerImp[impType]]: '+str(budgetInd[listCampPerImp[impType]]))
# aux53 =(mat_r_by_Imp[listCampPerImp[impType],impType] <= budgetInd[listCampPerImp[impType]])
indInterested = (mat_x_by_ImpInd[listCampPerImp[impType],impType]>0) *\
(mat_r_by_Imp[listCampPerImp[impType],impType] <= budgetInd[listCampPerImp[impType]])
if np.sum(indInterested) >0:
bidUsingInd= True
if bidUsingInd:
## There is at least one campaign that wants to bid.
posInt=listCampPerImp[impType][indInterested]
## Conditional probability assuming that the method is going to bid.
## This conditional probability excludes all those campaigns
## that do not want to bid
condProbInterested=mat_x_by_ImpInd[posInt, impType]
condProbInterested*=1.0/np.sum(condProbInterested)
auxPartSum=0.0
## Now we will choose in behalf of which campaign to bid for.
numInterest = len(condProbInterested)
auxPosForindBuyerInd = numInterest-1
z = 0
while z<numInterest:
auxPartSum += condProbInterested[z]
if auxPartSum >= unifs[1]:
## If we exceed unifs[1] go out of the loop
auxPosForindBuyerInd=z
z+=numInterest
z += 1
indBuyerInd=posInt[auxPosForindBuyerInd]
tryToBidInd=True
bidAmountInd=mat_bid_by_ImpInd[indBuyerInd, impType]
## If tryToBidInd == True, we will try to bid inbehalf of campaign indBuyerInd
## bidding an amount of bidAmountInd.
if(tryToBidInd):
## We first register that we are bidding on behalf of indBuyerInd for an
## impression of type impType
cartBidsInd[indBuyerInd, impType]+=1
## We win the auction only if the value we are bidding is higher
## than the market price observed by Ipinyou
if bidAmountInd>= mp_value:
## Impression Won. Register that we won the impression and the change
## in cost and profit.
cartWonInd[indBuyerInd, impType]+=1
costBidsInd[impType]-=mp_value
profitInd[impType]-=mp_value
# Now we need to check if the ad was clicked.
probOfClick=mat_ctrTest[indBuyerInd, impType]
if (unifs[2]<=probOfClick):
## User clicked, increase revenue and charge the campaign (i.e. DSP wins money).
cartClickedInd[indBuyerInd, impType]+=1
payment=mat_r_by_Imp[indBuyerInd, impType]
revenueInd[impType]+=payment
profitInd[impType]+=payment
budgetInd[indBuyerInd]-=payment
## Update L2 (Same code as done before for the pure indicator case)
indBuyerL2=0
tryToBidL2=False
bidAmountL2=0.0
if unifs[0] <= probBidL2[impType]:
## For each campaign we check if there is any that has enough budget to bid and that
## also wants to do so.
bidUsingL2=False
indInterested =\
(mat_r_by_Imp[listCampPerImp[impType],impType] <= budgetL2[listCampPerImp[impType]]) * \
(mat_x_by_ImpL2[listCampPerImp[impType],impType]>0)
if np.sum(indInterested) >0:
bidUsingL2= True
if bidUsingL2:
## There is at least one campaign that wants to bid.
posInt=listCampPerImp[impType][indInterested]
## Conditional probability assuming that the method is going to bid.
## This conditional probability excludes all those campaigns
## that do not want to bid
condProbInterested=mat_x_by_ImpL2[posInt, impType]
condProbInterested*=1.0/np.sum(condProbInterested)
auxPartSum=0.0
## Now we will choose in behalf of which campaign to bid for.
numInterest = len(condProbInterested)
auxPosForindBuyerL2 = numInterest-1
z = 0
while z <numInterest:
auxPartSum += condProbInterested[z]
if auxPartSum >= unifs[1]:
## If we exceed unifs[1] go out of the loop
auxPosForindBuyerL2=z
z+=numInterest
z += 1
indBuyerL2=posInt[auxPosForindBuyerL2]
tryToBidL2=True
bidAmountL2=mat_bid_by_ImpL2[indBuyerL2, impType]
if(tryToBidL2):
cartBidsL2[indBuyerL2, impType]+=1
if bidAmountL2>= mp_value:
## Impression Won.
cartWonL2[indBuyerL2, impType]+=1
costBidsL2[impType]-=mp_value
profitL2[impType]-=mp_value
# Now we need to check if the ad was clicked.
probOfClick=mat_ctrTest[indBuyerL2, impType]
if (unifs[2]<=probOfClick):
## User clicked, increase revenue and charge the campaign.
cartClickedL2[indBuyerL2, impType]+=1
payment=mat_r_by_Imp[indBuyerL2, impType]
revenueL2[impType]+=payment
profitL2[impType]+=payment
budgetL2[indBuyerL2]-=payment
## Update L2Ind (Same code as done before for the pure indicator case)
indBuyerL2Ind=0
tryToBidL2Ind=False
bidAmountL2Ind=0.0
if unifs[0] <= probBidL2Ind[impType]:
## For each campaign we check if there is any that has enough budget to bid and that
## also wants to do so.
bidUsingL2Ind=False
indInterested =\
(mat_r_by_Imp[listCampPerImp[impType],impType] <= budgetL2Ind[listCampPerImp[impType]]) * \
(mat_x_by_ImpL2Ind[listCampPerImp[impType],impType]>0)
if np.sum(indInterested) >0:
bidUsingL2Ind= True
if bidUsingL2Ind:
## There is at least one campaign that wants to bid.
posInt=listCampPerImp[impType][indInterested]
## Conditional probability assuming that the method is going to bid.
## This conditional probability excludes all those campaigns
## that do not want to bid
condProbInterested=mat_x_by_ImpL2Ind[posInt, impType]
condProbInterested*=1.0/np.sum(condProbInterested)
auxPartSum=0.0
## Now we will choose in behalf of which campaign to bid for.
numInterest = len(condProbInterested)
auxPosForindBuyerL2Ind = numInterest-1
z = 0
while z < numInterest:
auxPartSum += condProbInterested[z]
if auxPartSum >= unifs[1]:
## If we exceed unifs[1] go out of the loop
auxPosForindBuyerL2Ind=z
z+=numInterest
z += 1
indBuyerL2Ind=posInt[auxPosForindBuyerL2Ind]
tryToBidL2Ind=True
bidAmountL2Ind = mat_bid_by_ImpL2Ind[indBuyerL2Ind, impType]
if(tryToBidL2Ind):
cartBidsL2Ind[indBuyerL2Ind, impType]+=1
if bidAmountL2Ind>= mp_value:
## Impression Won.
cartWonL2Ind[indBuyerL2Ind, impType]+=1
costBidsL2Ind[impType]-=mp_value
profitL2Ind[impType]-=mp_value
# Now we need to check if the ad was clicked.
probOfClick=mat_ctrTest[indBuyerL2Ind, impType]
if (unifs[2]<=probOfClick):
## User clicked, increase revenue and charge the campaign.
cartClickedL2Ind[indBuyerL2Ind, impType]+=1
payment=mat_r_by_Imp[indBuyerL2Ind, impType]
revenueL2Ind[impType]+=payment
profitL2Ind[impType]+=payment
budgetL2Ind[indBuyerL2Ind]-=payment
### Now we update the Greedy Policy
## The greedy heuristic bids for the campaign which stills have remaining
## budget and from thos bid for the one with highest r times ctr.
## The previous is true as Ipinyou assumes second price auctions.
indBuyerGr=-1
bidAmountGr=0.0
tryToBidGr=False
indInterested =\
mat_r_by_Imp[listCampPerImp[impType],impType] <= budgetGr[listCampPerImp[impType]]
if np.sum(indInterested) > 0:
posInt=listCampPerImp[impType][indInterested]
indBuyerGr = posInt[np.argmax(mat_rctr_by_ImpTrain[posInt,impType])]
bidAmountGr = mat_rctr_by_ImpTrain[indBuyerGr, impType]
tryToBidGr = True
## If tryToBidGr == True, we will bid in behalf of campaign 'indBuyerGr'
## the amount 'bidAmountGr'
if (tryToBidGr):
## Save that we are bidding in behalf of 'indBuyerGr' for an impression of
## type 'impType'
cartBidsGr[indBuyerGr, impType]+=1
## We win the auction only if the value we are bidding is higher
## than the market price observed by Ipinyou.
if bidAmountGr>= mp_value:
## Impression Won.
cartWonGr[indBuyerGr, impType]+=1
costBidsGr[impType]-=mp_value
profitGr[impType]-=mp_value
# Now we need to check if the ad was clicked.
probOfClick=mat_ctrTest[indBuyerGr, impType]
if (unifs[2]<=probOfClick):
## User clicked, increase revenue and charge the campaign.
cartClickedGr[indBuyerGr, impType]+=1
payment=mat_r_by_Imp[indBuyerGr, impType]
revenueGr[impType]+=payment
profitGr[impType]+=payment
budgetGr[indBuyerGr]-=payment
return [budgetInd, cartBidsInd, cartWonInd, cartClickedInd, costBidsInd, revenueInd, \
profitInd, budgetL2, cartBidsL2, cartWonL2, cartClickedL2, costBidsL2, revenueL2, \
profitL2, budgetL2Ind, cartBidsL2Ind, cartWonL2Ind, cartClickedL2Ind, costBidsL2Ind, \
revenueL2Ind, profitL2Ind, budgetGr, cartBidsGr, cartWonGr, cartClickedGr, costBidsGr,\
revenueGr, profitGr]
# ## Simulation for the profit maximization, profit maximization + L2 and Greedy
# Here we want to run the experiment change the budget values to be [(1.0/32.0), (1.0/8.0), .25, 0.5, 1.0]
# of the budgets used by Ipinyou. The iteration over the percentage budget values is done in 'for perc in perVector_m:'
def ExperIndL2IndAndGreedy(numCampaigns, num_impressions, num_edges, index_Imps, \
index_sizeCamps, PPFTable, numericBeta, vector_q, vector_mOrigTest, \
vector_sTest, vector_ctrTrain, vector_ctrTest, ImpInOrder, MPInOrder, impNames, \
alphasInd, num_itInd, alphasL2Ind, num_itL2Ind, p_grad_TypeInd, p_grad_TypeL2Ind, \
tau, init_lam, listCampPerImp, perVector_m=[(1.0/32.0), (1.0/8.0), .25, 0.5, 1.0], sim=100):
print('Starting ExperIndL2IndAndGreedy')
## The gradient type is needed as the different utility functions have different forms
## for p'(\cdot), and we want to use the right subgradient depending on the method we are using.
global p_grad_Type
vector_rctrTrain=np.multiply(vector_q, vector_ctrTrain)
vector_rctrTest=np.multiply(vector_q, vector_ctrTest)
dictToRetInd={}
dictToRetL2Ind={}
dictToRetGr={}
for perc in perVector_m:
## We first run the primal dual-subgradient method using the pure indicator utility function first
## and then the indicator plus l2 penalization.
print("Percentage: "+str(perc))
vector_m = vector_mOrigTest*perc
vector_s = vector_sTest
ext_s = vector_s[index_Imps]
dictToRetInd[perc]=[]
dictToRetL2Ind[perc]=[]
dictToRetGr[perc]=[]
p_grad_Type=p_grad_TypeInd
print('About to Run the SubgrAlgSavPrimDualObjInd using '+str(num_itInd)+' iterations')
initTime =time.time()
[dual_FnValues, primal_GivenMu, budget_used, dual_vars, dual_AvgLamFnValues, \
primal_AvgLamGivenMu, budget_LamAvgUse, dual_varsAvg]=SubgrAlgSavPrimDualObjInd(\
init_lam, num_itInd, alphasInd, vector_q, vector_ctrTrain, vector_rctrTrain, vector_s, ext_s, \
vector_m, num_impressions, numCampaigns, num_edges, \
PPFTable, numericBeta, index_sizeCamps, index_Imps, (num_itInd-1), p_grad_Type)
print("Took: "+str( time.time()-initTime)+' seconds')
#print("Duality Gap Last Iteration")
#print(str(dual_AvgLamFnValues[len(dual_AvgLamFnValues)-1]-primal_AvgLamGivenMu[len(primal_AvgLamGivenMu)-1]))
lamFinal=dual_varsAvg[len(dual_varsAvg)-1]
ext_LamFinal=ExtendSizeCamps(lamFinal, index_sizeCamps)
bidsInd=OptimalBids(ext_LamFinal, vector_rctrTrain)
[rho_eval_Ind, beta_eval_Ind]=CalcRhoAndBetaVectors(bidsInd, num_edges, index_Imps, PPFTable, numericBeta)
xInd = CalculateLPGurobi(rho_eval_Ind, beta_eval_Ind, vector_rctrTrain, vector_m, \
ext_s, num_impressions, numCampaigns, num_edges, index_Imps, \
index_sizeCamps)
# xInd=OptimalX(beta_eval, rho_eval, ext_LamFinal, ext_s, vector_rctrTrain, num_edges, numCampaigns, \
# num_impressions, index_Imps, index_sizeCamps)
print('')
print('')
print('About to Run the SubgrAlgSavPrimDualObjFn_L2Ind using '+str(num_itL2Ind)+' iterations')
initTime =time.time()
p_grad_Type=p_grad_TypeL2Ind
tau=np.power(vector_m, -1)
[dual_FnValues, primal_GivenMu, budget_used, dual_vars, dual_AvgLamFnValues, \
primal_AvgLamGivenMu, budget_LamAvgUse, dual_varsAvg]=SubgrAlgSavPrimDualObjFn_L2Ind(\
init_lam, num_itL2Ind, alphasL2Ind, vector_q, vector_ctrTrain, vector_rctrTrain, vector_s, ext_s, \
vector_m, num_impressions, numCampaigns, num_edges, PPFTable, numericBeta, index_sizeCamps, \
index_Imps, (num_itL2Ind-1), p_grad_Type, tau, True)
print("Took: "+str( time.time()-initTime)+' seconds')
#print("Duality Gap Last Iteration")
#print(str(dual_AvgLamFnValues[len(dual_AvgLamFnValues)-1]-primal_AvgLamGivenMu[len(primal_AvgLamGivenMu)-1]))
lamFinal=dual_varsAvg[len(dual_varsAvg)-1]
ext_LamFinal=ExtendSizeCamps(lamFinal, index_sizeCamps)
bidsL2Ind=OptimalBids(ext_LamFinal, vector_rctrTrain)
[rho_eval_L2Ind, beta_eval_L2Ind]=CalcRhoAndBetaVectors(bidsL2Ind, num_edges, index_Imps, PPFTable, numericBeta)
xL2Ind = CalculateQuadGurobi(rho_eval_L2Ind, beta_eval_L2Ind, vector_rctrTrain, vector_m, ext_s, \
num_impressions, numCampaigns, num_edges, index_Imps, index_sizeCamps, tau)
# xL2Ind=OptimalX(beta_eval, rho_eval, ext_LamFinal, ext_s, vector_rctrTrain, num_edges, numCampaigns, \
# num_impressions, index_Imps, index_sizeCamps)
## Now that we have run the primal-dual subgradient methods we run simulations of
## how they would perform in the test log as explained in the paper. The nuber of simulations to
## run is equal to the parameter sim.
print('')
print('')
print('Finished running the Primal-Dual Algorithms')
print('Starting RunIndL2IndAndGreedy using '+str(perc)+' percentage of the Test budgets')
initTime =time.time()
for i in range(sim):
[budgetInd, cartBidsInd, cartWonInd, cartClickedInd, costBidsInd, \
revenueInd, profitInd, budgetL2Ind, cartBidsL2Ind, cartWonL2Ind, \
cartClickedL2Ind, costBidsL2Ind, revenueL2Ind, profitL2Ind, budgetGr, \
cartBidsGr, cartWonGr, cartClickedGr, costBidsGr, revenueGr, profitGr]=\
RunIndL2IndAndGreedy(numCampaigns, num_impressions, num_edges, index_Imps, \
index_sizeCamps, PPFTable, numericBeta, vector_q, vector_m, vector_ctrTrain, \
vector_rctrTrain, vector_ctrTest, vector_rctrTest, bidsInd, xInd, bidsL2Ind, \
xL2Ind, tau, ImpInOrder, MPInOrder, impNames,listCampPerImp)
dictToRetInd[perc].append([budgetInd, cartBidsInd, cartWonInd, \
cartClickedInd, costBidsInd, revenueInd, profitInd])
dictToRetL2Ind[perc].append([budgetL2Ind, cartBidsL2Ind, cartWonL2Ind, \
cartClickedL2Ind, costBidsL2Ind, revenueL2Ind, profitL2Ind])
dictToRetGr[perc].append([budgetGr, cartBidsGr, cartWonGr, cartClickedGr, \
costBidsGr, revenueGr, profitGr])
# print("Profit Ind: "+str(np.sum(profitInd)))
# print("Profit Gr: "+str(np.sum(profitGr)))
# print("Ratio of Profits: "+str(np.sum(profitInd)/np.sum(profitGr)))
print("Took: "+str(time.time()-initTime)+' seconds')
return [dictToRetInd, dictToRetL2Ind, dictToRetGr]
def Exper_Ind_L2_L2Ind_Greedy(numCampaigns, num_impressions, num_edges, index_Imps, index_sizeCamps,\
PPFTable, numericBeta, vector_q, vector_mOrigTest, vector_sTest, vector_ctrTrain, vector_ctrTest, \
ImpInOrder, MPInOrder, impNames, alphasInd, num_itInd, alphasL2, num_itL2, alphasL2Ind, num_itL2Ind,\
p_grad_TypeInd, p_grad_TypeL2, p_grad_TypeL2Ind, tau, init_lam, listCampPerImp,\
perVector_m=[(1.0/32.0), (1.0/8.0), .25, 0.5, 1.0], sim=100):
print('Starting Exper_Ind_L2_L2Ind_Greedy')
## The gradient type is needed as the different utility functions have different forms
## for p'(\cdot), and we want to use the right subgradient depending on the method we are using.
global p_grad_Type
vector_rctrTrain=np.multiply(vector_q, vector_ctrTrain)
vector_rctrTest=np.multiply(vector_q, vector_ctrTest)
dictToRetInd={}
dictToRetL2={}
dictToRetL2Ind={}
dictToRetGr={}
for perc in perVector_m:
## We first run the primal dual-subgradient method using the pure indicator utility function first
## and then the indicator plus l2 penalization.
print("Percentage: "+str(perc))
vector_m = vector_mOrigTest*perc
vector_s = vector_sTest
ext_s = vector_s[index_Imps]
dictToRetInd[perc] = []
dictToRetL2[perc] = []
dictToRetL2Ind[perc] = []
dictToRetGr[perc] = []
p_grad_Type=p_grad_TypeInd
print('About to Run the SubgrAlgSavPrimDualObjInd using '+str(num_itInd)+' iterations')
initTime =time.time()
[dual_FnValues, primal_GivenMu, budget_used, dual_vars, dual_AvgLamFnValues, \
primal_AvgLamGivenMu, budget_LamAvgUse, dual_varsAvg]=SubgrAlgSavPrimDualObjInd(\
init_lam, num_itInd, alphasInd, vector_q, vector_ctrTrain, vector_rctrTrain, vector_s, ext_s, \
vector_m, num_impressions, numCampaigns, num_edges, \
PPFTable, numericBeta, index_sizeCamps, index_Imps, (num_itInd-1), p_grad_Type)
print("Took: "+str( time.time()-initTime)+' seconds')
#print("Duality Gap Last Iteration")
#print(str(dual_AvgLamFnValues[len(dual_AvgLamFnValues)-1]-primal_AvgLamGivenMu[len(primal_AvgLamGivenMu)-1]))
lamFinal=dual_varsAvg[len(dual_varsAvg)-1]
ext_LamFinal=ExtendSizeCamps(lamFinal, index_sizeCamps)
bidsInd=OptimalBids(ext_LamFinal, vector_rctrTrain)
[rho_eval_Ind, beta_eval_Ind]=CalcRhoAndBetaVectors(bidsInd, num_edges, index_Imps, PPFTable, numericBeta)
xInd = CalculateLPGurobi(rho_eval_Ind, beta_eval_Ind, vector_rctrTrain, vector_m, \
ext_s, num_impressions, numCampaigns, num_edges, index_Imps, \
index_sizeCamps)
# xInd=OptimalX(beta_eval, rho_eval, ext_LamFinal, ext_s, vector_rctrTrain, num_edges, numCampaigns, \
# num_impressions, index_Imps, index_sizeCamps)
print('')
print('')
print('About to Run the SubgrAlgSavPrimDualObjFn_L2Ind using '+str(num_itL2Ind)+' iterations without Indicator')
initTime =time.time()
p_grad_Type=p_grad_TypeL2
tau=np.power(vector_m, -1)
[dual_FnValues, primal_GivenMu, budget_used, dual_vars, dual_AvgLamFnValues, \
primal_AvgLamGivenMu, budget_LamAvgUse, dual_varsAvg]=SubgrAlgSavPrimDualObjFn_L2Ind(\
init_lam, num_itL2Ind, alphasL2Ind, vector_q, vector_ctrTrain, vector_rctrTrain, vector_s, ext_s, \
vector_m, num_impressions, numCampaigns, num_edges, PPFTable, numericBeta, index_sizeCamps, \
index_Imps, (num_itL2Ind-1), p_grad_Type, tau, False)
print("Took: "+str( time.time()-initTime)+' seconds')
#print("Duality Gap Last Iteration")
#print(str(dual_AvgLamFnValues[len(dual_AvgLamFnValues)-1]-primal_AvgLamGivenMu[len(primal_AvgLamGivenMu)-1]))
lamFinal=dual_varsAvg[len(dual_varsAvg)-1]
ext_LamFinal=ExtendSizeCamps(lamFinal, index_sizeCamps)
bidsL2=OptimalBids(ext_LamFinal, vector_rctrTrain)
[rho_eval_L2, beta_eval_L2]=CalcRhoAndBetaVectors(bidsL2, num_edges, index_Imps, PPFTable, numericBeta)
xL2 = CalculateQuadGurobi(rho_eval_L2, beta_eval_L2, vector_rctrTrain, vector_m, ext_s, \
num_impressions, numCampaigns, num_edges, index_Imps, index_sizeCamps, tau, addIndicator = False)
print('')
print('')
print('About to Run the SubgrAlgSavPrimDualObjFn_L2Ind using '+str(num_itL2Ind)+' iterations')
initTime =time.time()
p_grad_Type=p_grad_TypeL2Ind
tau=np.power(vector_m, -1)
[dual_FnValues, primal_GivenMu, budget_used, dual_vars, dual_AvgLamFnValues, \
primal_AvgLamGivenMu, budget_LamAvgUse, dual_varsAvg]=SubgrAlgSavPrimDualObjFn_L2Ind(\
init_lam, num_itL2Ind, alphasL2Ind, vector_q, vector_ctrTrain, vector_rctrTrain, vector_s, ext_s, \
vector_m, num_impressions, numCampaigns, num_edges, PPFTable, numericBeta, index_sizeCamps, \
index_Imps, (num_itL2Ind-1), p_grad_Type, tau, True)
print("Took: "+str( time.time()-initTime)+' seconds')
#print("Duality Gap Last Iteration")
#print(str(dual_AvgLamFnValues[len(dual_AvgLamFnValues)-1]-primal_AvgLamGivenMu[len(primal_AvgLamGivenMu)-1]))
lamFinal=dual_varsAvg[len(dual_varsAvg)-1]
ext_LamFinal=ExtendSizeCamps(lamFinal, index_sizeCamps)
bidsL2Ind=OptimalBids(ext_LamFinal, vector_rctrTrain)
[rho_eval_L2Ind, beta_eval_L2Ind]=CalcRhoAndBetaVectors(bidsL2Ind, num_edges, index_Imps, PPFTable, numericBeta)
xL2Ind = CalculateQuadGurobi(rho_eval_L2Ind, beta_eval_L2Ind, vector_rctrTrain, vector_m, ext_s, \
num_impressions, numCampaigns, num_edges, index_Imps, index_sizeCamps, tau, addIndicator = True)
# xL2Ind=OptimalX(beta_eval, rho_eval, ext_LamFinal, ext_s, vector_rctrTrain, num_edges, numCampaigns, \
# num_impressions, index_Imps, index_sizeCamps)
## Now that we have run the primal-dual subgradient methods we run simulations of
## how they would perform in the test log as explained in the paper. The nuber of simulations to
## run is equal to the parameter sim.
print('')
print('')
print('Finished running the Primal-Dual Algorithms')
print('Starting RunInd_L2_L2Ind_Greedy using '+str(perc)+' percentage of the Test budgets')
initTime =time.time()
for i in range(sim):
[budgetInd, cartBidsInd, cartWonInd, cartClickedInd, costBidsInd, revenueInd, \
profitInd, budgetL2, cartBidsL2, cartWonL2, cartClickedL2, costBidsL2, revenueL2, \
profitL2, budgetL2Ind, cartBidsL2Ind, cartWonL2Ind, cartClickedL2Ind, costBidsL2Ind, \
revenueL2Ind, profitL2Ind, budgetGr, cartBidsGr, cartWonGr, cartClickedGr, costBidsGr,\
revenueGr, profitGr] = RunInd_L2_L2Ind_Greedy(numCampaigns, num_impressions, num_edges, index_Imps, \
index_sizeCamps, PPFTable, numericBeta, vector_q, vector_m, vector_ctrTrain, \
vector_rctrTrain, vector_ctrTest, vector_rctrTest, bidsInd, xInd, bidsL2, xL2, bidsL2Ind, \
xL2Ind, tau, ImpInOrder, MPInOrder, impNames,listCampPerImp)
dictToRetInd[perc].append([budgetInd, cartBidsInd, cartWonInd, \
cartClickedInd, costBidsInd, revenueInd, profitInd])
dictToRetL2[perc].append([budgetL2, cartBidsL2, cartWonL2, \
cartClickedL2, costBidsL2, revenueL2, profitL2])
dictToRetL2Ind[perc].append([budgetL2Ind, cartBidsL2Ind, cartWonL2Ind, \
cartClickedL2Ind, costBidsL2Ind, revenueL2Ind, profitL2Ind])
dictToRetGr[perc].append([budgetGr, cartBidsGr, cartWonGr, cartClickedGr, \
costBidsGr, revenueGr, profitGr])
print("Took: "+str(time.time()-initTime)+' seconds')
return [dictToRetInd, dictToRetL2, dictToRetL2Ind, dictToRetGr]
def ExperIndL2IndAndGreedyOnePerc(numCampaigns, num_impressions, num_edges, index_Imps, \
index_sizeCamps, PPFTable, numericBeta, vector_q, vector_mOrigTest, \
vector_sTest, vector_ctrTrain, vector_ctrTest, ImpInOrder, MPInOrder, impNames, \
alphasInd, num_itInd, alphasL2Ind, num_itL2Ind, p_grad_TypeInd, p_grad_TypeL2Ind, \
init_lam, listCampPerImp, perc, sim, seeds):
print('Starting ExperIndL2IndAndGreedy')
## The gradient type is needed as the different utility functions have different forms
## for p'(\cdot), and we want to use the right subgradient depending on the method we are using.
np.random.seed(12345)
global p_grad_Type
vector_rctrTrain=np.multiply(vector_q, vector_ctrTrain)
vector_rctrTest=np.multiply(vector_q, vector_ctrTest)
dictToRetInd = {}
dictToRetL2Ind = {}
dictToRetGr = {}
## We first run the primal dual-subgradient method using the pure indicator utility function first
## and then the indicator plus l2 penalization.
print('')
print("Percentage: "+str(perc)+ ', process id: '+str(os.getpid()))
vector_m = vector_mOrigTest[:]*perc
vector_s = vector_sTest[:]
ext_s = vector_s[index_Imps]
dictToRetInd[perc]=[]
dictToRetL2Ind[perc]=[]
dictToRetGr[perc]=[]
p_grad_Type=p_grad_TypeInd
print('About to Run the SubgrAlgSavPrimDualObjInd using '+str(num_itInd)+' iterations, '+'process id: '+str(os.getpid()))
initTime =time.time()
[_, _, _, dual_vars, _, _, _, dual_varsAvg] = SubgrAlgSavPrimDualObjInd(\
init_lam, num_itInd, alphasInd, vector_q, vector_ctrTrain, vector_rctrTrain, vector_s, ext_s, \
vector_m, num_impressions, numCampaigns, num_edges, \
PPFTable, numericBeta, index_sizeCamps, index_Imps, (num_itInd-1), p_grad_Type)
print("Took: "+str( time.time()-initTime)+' seconds, '+'process id: '+str(os.getpid()))
#print("Duality Gap Last Iteration")
#print(str(dual_AvgLamFnValues[len(dual_AvgLamFnValues)-1]-primal_AvgLamGivenMu[len(primal_AvgLamGivenMu)-1]))
lamFinal=dual_varsAvg[len(dual_varsAvg)-1]
ext_LamFinal=ExtendSizeCamps(lamFinal, index_sizeCamps)
bidsInd=OptimalBids(ext_LamFinal, vector_rctrTrain)
[rho_eval_Ind, beta_eval_Ind]=CalcRhoAndBetaVectors(bidsInd, num_edges, index_Imps, PPFTable, numericBeta)
xInd = CalculateLPGurobi(rho_eval_Ind, beta_eval_Ind, vector_rctrTrain, vector_m, \
ext_s, num_impressions, numCampaigns, num_edges, index_Imps, \
index_sizeCamps)
# xInd=OptimalX(beta_eval, rho_eval, ext_LamFinal, ext_s, vector_rctrTrain, num_edges, numCampaigns, \
# num_impressions, index_Imps, index_sizeCamps)
print('')
print('About to Run the SubgrAlgSavPrimDualObjFn_L2Ind using '+str(num_itL2Ind)+' iterations, '+'process id: '+str(os.getpid()))
initTime =time.time()
p_grad_Type=p_grad_TypeL2Ind
tau = np.power(vector_m, -1)
[_, _, _, dual_vars, _, _, _, dual_varsAvg] = SubgrAlgSavPrimDualObjFn_L2Ind(\
init_lam, num_itL2Ind, alphasL2Ind, vector_q, vector_ctrTrain, vector_rctrTrain, vector_s, ext_s, \
vector_m, num_impressions, numCampaigns, num_edges, PPFTable, numericBeta, index_sizeCamps, \
index_Imps, (num_itL2Ind-1), p_grad_Type, tau, True)
print("Took: "+str( time.time()-initTime)+' seconds, '+'process id: '+str(os.getpid()))
#print("Duality Gap Last Iteration")
#print(str(dual_AvgLamFnValues[len(dual_AvgLamFnValues)-1]-primal_AvgLamGivenMu[len(primal_AvgLamGivenMu)-1]))
lamFinal=dual_varsAvg[len(dual_varsAvg) - 1]
ext_LamFinal=ExtendSizeCamps(lamFinal, index_sizeCamps)
bidsL2Ind=OptimalBids(ext_LamFinal, vector_rctrTrain)
[rho_eval_L2Ind, beta_eval_L2Ind] = CalcRhoAndBetaVectors(bidsL2Ind, num_edges, index_Imps, PPFTable, numericBeta)
xL2Ind = CalculateQuadGurobi(rho_eval_L2Ind, beta_eval_L2Ind, vector_rctrTrain, vector_m, ext_s, \
num_impressions, numCampaigns, num_edges, index_Imps, index_sizeCamps, tau)
## Now that we have run the primal-dual subgradient methods we run simulations of
## how they would perform in the test log as explained in the paper. The nuber of simulations to
## run is equal to the parameter sim.
print('Finished running the Primal-Dual Algorithms, '+'process id: '+str(os.getpid()))
print('About to Run the RunIndL2IndAndGreedy using '+str(perc)+' percentage of the Test budgets, '+'process id: '+str(os.getpid()))
initTime = time.time()
for i in range(sim):
np.random.seed(seeds[i])
[budgetInd, cartBidsInd, cartWonInd, cartClickedInd, costBidsInd, \
revenueInd, profitInd, budgetL2Ind, cartBidsL2Ind, cartWonL2Ind, \
cartClickedL2Ind, costBidsL2Ind, revenueL2Ind, profitL2Ind, budgetGr, \
cartBidsGr, cartWonGr, cartClickedGr, costBidsGr, revenueGr, profitGr]=\
RunIndL2IndAndGreedy(numCampaigns, num_impressions, num_edges, index_Imps, \
index_sizeCamps, PPFTable, numericBeta, vector_q, vector_m, vector_ctrTrain, \
vector_rctrTrain, vector_ctrTest, vector_rctrTest, bidsInd, xInd, bidsL2Ind, \
xL2Ind, tau, ImpInOrder, MPInOrder, impNames,listCampPerImp)
dictToRetInd[perc].append([budgetInd, cartBidsInd, cartWonInd, \
cartClickedInd, costBidsInd, revenueInd, profitInd])
dictToRetL2Ind[perc].append([budgetL2Ind, cartBidsL2Ind, cartWonL2Ind, \
cartClickedL2Ind, costBidsL2Ind, revenueL2Ind, profitL2Ind])
dictToRetGr[perc].append([budgetGr, cartBidsGr, cartWonGr, cartClickedGr, \
costBidsGr, revenueGr, profitGr])
# print("Profit Ind: "+str(np.sum(profitInd)))
# print("Profit Gr: "+str(np.sum(profitGr)))
# print("Ratio of Profits: "+str(np.sum(profitInd)/np.sum(profitGr)))
print("Took: "+str(time.time()-initTime)+' seconds')
return [dictToRetInd, dictToRetL2Ind, dictToRetGr]
def Exper_Ind_L2_L2Ind_GreedyOnePerc(numCampaigns, num_impressions, num_edges, index_Imps, index_sizeCamps,\
PPFTable, numericBeta, vector_q, vector_mOrigTest, vector_sTest, vector_ctrTrain, vector_ctrTest, \
ImpInOrder, MPInOrder, impNames, alphasInd, num_itInd, alphasL2, num_itL2, alphasL2Ind, num_itL2Ind,\
p_grad_TypeInd, p_grad_TypeL2, p_grad_TypeL2Ind, init_lam, listCampPerImp,\
perc, sim, seeds, tauMult = -1):
print('Starting Exper_Ind_L2_L2Ind_Greedy')
## The gradient type is needed as the different utility functions have different forms
## for p'(\cdot), and we want to use the right subgradient depending on the method we are using.
np.random.seed(12345)
global p_grad_Type
vector_rctrTrain = np.multiply(vector_q, vector_ctrTrain)
vector_rctrTest = np.multiply(vector_q, vector_ctrTest)
dictToRetInd = {}
dictToRetL2 = {}
dictToRetL2Ind = {}
dictToRetGr = {}
print('')
print("Percentage: "+str(perc)+ ', process id: '+str(os.getpid()))
vector_m = vector_mOrigTest[:]*perc
vector_s = vector_sTest[:]
ext_s = vector_s[index_Imps]
dictToRetInd[perc] = []
dictToRetL2[perc] = []
dictToRetL2Ind[perc] = []
dictToRetGr[perc] = []
p_grad_Type=p_grad_TypeInd
print('About to Run the SubgrAlgSavPrimDualObjInd using '+str(num_itInd)+' iterations')
initTime =time.time()
[dual_FnValues, primal_GivenMu, budget_used, dual_vars, dual_AvgLamFnValues, \
primal_AvgLamGivenMu, budget_LamAvgUse, dual_varsAvg]=SubgrAlgSavPrimDualObjInd(\
init_lam, num_itInd, alphasInd, vector_q, vector_ctrTrain, vector_rctrTrain, vector_s, ext_s, \
vector_m, num_impressions, numCampaigns, num_edges, \
PPFTable, numericBeta, index_sizeCamps, index_Imps, (num_itInd-1), p_grad_Type)
print("Took: "+str( time.time()-initTime)+' seconds')
#print("Duality Gap Last Iteration")
#print(str(dual_AvgLamFnValues[len(dual_AvgLamFnValues)-1]-primal_AvgLamGivenMu[len(primal_AvgLamGivenMu)-1]))
lamFinal=dual_varsAvg[len(dual_varsAvg)-1]
ext_LamFinal=ExtendSizeCamps(lamFinal, index_sizeCamps)
bidsInd=OptimalBids(ext_LamFinal, vector_rctrTrain)
[rho_eval_Ind, beta_eval_Ind]=CalcRhoAndBetaVectors(bidsInd, num_edges, index_Imps, PPFTable, numericBeta)
xInd = CalculateLPGurobi(rho_eval_Ind, beta_eval_Ind, vector_rctrTrain, vector_m, \
ext_s, num_impressions, numCampaigns, num_edges, index_Imps, \
index_sizeCamps)
# xInd=OptimalX(beta_eval, rho_eval, ext_LamFinal, ext_s, vector_rctrTrain, num_edges, numCampaigns, \
# num_impressions, index_Imps, index_sizeCamps)
print('')
print('')
print('About to Run the SubgrAlgSavPrimDualObjFn_L2Ind using '+str(num_itL2Ind)+' iterations without Indicator')
initTime =time.time()
p_grad_Type=p_grad_TypeL2
tau=np.power(vector_m, -1) * tauMult
[dual_FnValues, primal_GivenMu, budget_used, dual_vars, dual_AvgLamFnValues, \
primal_AvgLamGivenMu, budget_LamAvgUse, dual_varsAvg]=SubgrAlgSavPrimDualObjFn_L2Ind(\
init_lam, num_itL2Ind, alphasL2Ind, vector_q, vector_ctrTrain, vector_rctrTrain, vector_s, ext_s, \
vector_m, num_impressions, numCampaigns, num_edges, PPFTable, numericBeta, index_sizeCamps, \
index_Imps, (num_itL2Ind-1), p_grad_Type, tau, False)
print("Took: "+str( time.time()-initTime)+' seconds')
#print("Duality Gap Last Iteration")
#print(str(dual_AvgLamFnValues[len(dual_AvgLamFnValues)-1]-primal_AvgLamGivenMu[len(primal_AvgLamGivenMu)-1]))
lamFinal=dual_varsAvg[len(dual_varsAvg)-1]
ext_LamFinal=ExtendSizeCamps(lamFinal, index_sizeCamps)
bidsL2=OptimalBids(ext_LamFinal, vector_rctrTrain)
[rho_eval_L2, beta_eval_L2]=CalcRhoAndBetaVectors(bidsL2, num_edges, index_Imps, PPFTable, numericBeta)
xL2 = CalculateQuadGurobi(rho_eval_L2, beta_eval_L2, vector_rctrTrain, vector_m, ext_s, \
num_impressions, numCampaigns, num_edges, index_Imps, index_sizeCamps, tau, addIndicator = False)
print('')
print('')
print('About to Run the SubgrAlgSavPrimDualObjFn_L2Ind using '+str(num_itL2Ind)+' iterations')
initTime =time.time()
p_grad_Type=p_grad_TypeL2Ind
[dual_FnValues, primal_GivenMu, budget_used, dual_vars, dual_AvgLamFnValues, \
primal_AvgLamGivenMu, budget_LamAvgUse, dual_varsAvg]=SubgrAlgSavPrimDualObjFn_L2Ind(\
init_lam, num_itL2Ind, alphasL2Ind, vector_q, vector_ctrTrain, vector_rctrTrain, vector_s, ext_s, \
vector_m, num_impressions, numCampaigns, num_edges, PPFTable, numericBeta, index_sizeCamps, \
index_Imps, (num_itL2Ind-1), p_grad_Type, tau, True)
print("Took: "+str( time.time()-initTime)+' seconds')
#print("Duality Gap Last Iteration")
#print(str(dual_AvgLamFnValues[len(dual_AvgLamFnValues)-1]-primal_AvgLamGivenMu[len(primal_AvgLamGivenMu)-1]))
lamFinal=dual_varsAvg[len(dual_varsAvg)-1]
ext_LamFinal=ExtendSizeCamps(lamFinal, index_sizeCamps)
bidsL2Ind=OptimalBids(ext_LamFinal, vector_rctrTrain)
[rho_eval_L2Ind, beta_eval_L2Ind]=CalcRhoAndBetaVectors(bidsL2Ind, num_edges, index_Imps, PPFTable, numericBeta)
xL2Ind = CalculateQuadGurobi(rho_eval_L2Ind, beta_eval_L2Ind, vector_rctrTrain, vector_m, ext_s, \
num_impressions, numCampaigns, num_edges, index_Imps, index_sizeCamps, tau, addIndicator = True)
# xL2Ind=OptimalX(beta_eval, rho_eval, ext_LamFinal, ext_s, vector_rctrTrain, num_edges, numCampaigns, \
# num_impressions, index_Imps, index_sizeCamps)
## Now that we have run the primal-dual subgradient methods we run simulations of
## how they would perform in the test log as explained in the paper. The nuber of simulations to
## run is equal to the parameter sim.
print('')
print('')
print('Finished running the Primal-Dual Algorithms')
print('Starting RunInd_L2_L2Ind_Greedy using '+str(perc)+' percentage of the Test budgets')
initTime =time.time()
for i in range(sim):
np.random.seed(seeds[i])
[budgetInd, cartBidsInd, cartWonInd, cartClickedInd, costBidsInd, revenueInd, \
profitInd, budgetL2, cartBidsL2, cartWonL2, cartClickedL2, costBidsL2, revenueL2, \
profitL2, budgetL2Ind, cartBidsL2Ind, cartWonL2Ind, cartClickedL2Ind, costBidsL2Ind, \
revenueL2Ind, profitL2Ind, budgetGr, cartBidsGr, cartWonGr, cartClickedGr, costBidsGr,\
revenueGr, profitGr] = RunInd_L2_L2Ind_Greedy(numCampaigns, num_impressions, num_edges, index_Imps, \
index_sizeCamps, PPFTable, numericBeta, vector_q, vector_m, vector_ctrTrain, \
vector_rctrTrain, vector_ctrTest, vector_rctrTest, bidsInd, xInd, bidsL2, xL2, bidsL2Ind, \
xL2Ind, tau, ImpInOrder, MPInOrder, impNames,listCampPerImp)
dictToRetInd[perc].append([budgetInd, cartBidsInd, cartWonInd, \
cartClickedInd, costBidsInd, revenueInd, profitInd])
dictToRetL2[perc].append([budgetL2, cartBidsL2, cartWonL2, \
cartClickedL2, costBidsL2, revenueL2, profitL2])
dictToRetL2Ind[perc].append([budgetL2Ind, cartBidsL2Ind, cartWonL2Ind, \
cartClickedL2Ind, costBidsL2Ind, revenueL2Ind, profitL2Ind])
dictToRetGr[perc].append([budgetGr, cartBidsGr, cartWonGr, cartClickedGr, \
costBidsGr, revenueGr, profitGr])
print("Took: "+str(time.time()-initTime)+' seconds')
return [dictToRetInd, dictToRetL2, dictToRetL2Ind, dictToRetGr]
## For the Pareto Experiment we need to Run only the L2+Indicator for several values of \tau a
## number of simulations. We also need to run the greedy method
def RunSimOnlyGreedy(numCampaigns, num_impressions, num_edges, index_Imps, \
index_sizeCamps, PPFTable, numericBeta, vector_q, vector_m, vector_ctrTrain,\
vector_rctrTrain, vector_ctrTest, vector_rctrTest, ImpInOrder, MPInOrder, impNames,\
listCampPerImp, mult = 1.0):
## We first initialize the budgets used, matrices of bids made, won, and clicked for
## three methods.
[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, mat_rctr_by_ImpTrain, _, _, \
_, _, _]=CreateDataForSimulation(np.zeros(num_edges), np.zeros(num_edges), \
numCampaigns, num_impressions, num_edges, index_Imps, index_sizeCamps, \
vector_q, vector_ctrTrain, vector_rctrTrain, vector_m, PPFTable, numericBeta)
[_, _, _, _, _, _, _, budgetGr, cartBidsGr, cartWonGr, \
cartClickedGr, costBidsGr, revenueGr, profitGr, mat_r_by_Imp, \
mat_ctrTest, _, _, _, _, _, _]=CreateDataForSimulation(np.zeros(num_edges), \
| np.zeros(num_edges) | numpy.zeros |
from pathlib import Path
from argparse import Namespace
import numpy as np
import torch
from torch.utils.data import DataLoader
from torchvision.transforms import Compose, Resize, Normalize
from model import lpn
from model.estimator_2d import Estimator2D
from data.video_dataset import VideoDataset
from data.bbox_utils import xywh2cs, adjust_aspect_ratio
from data.data_utils import suggest_metadata
from data.person_detection import detect_person
class LPN_Estimator2D(Estimator2D):
"""2D human pose estimator using lpn-pytorch (https://github.com/zhang943/lpn-pytorch)"""
PRETRAINED_GID = '<KEY>'
CFG_FILE = 'model/configs/lpn50_256x192_gd256x2_gc.yaml'
CKPT_FILE = 'model/checkpoints/lpn_50_256x192.pth'
BATCH_SIZE = 64
def __init__(self, device='cpu'):
self.device = device
# download pretrained weights if necessary
if not Path(self.CKPT_FILE).exists():
self.download_weights()
# load pretrained lpn pose network
self.model, self.cfg = self.create_lpn_model(
Path(self.CFG_FILE).resolve(),
Path(self.CKPT_FILE).resolve())
def download_weights(self):
try:
from google_drive_downloader import GoogleDriveDownloader as gdd
gdd.download_file_from_google_drive(self.PRETRAINED_GID, self.CKPT_FILE)
except ImportError as error:
print('GoogleDriveDownloader has to be installed for automatic download' \
'You can download the weights manually under: https://drive.google.com/file/d/1dldLwjOacXV_uGkbxfEIPPJEK_2A-Snp/view?usp=sharing')
def create_lpn_model(self, cfg_file, ckp_file):
# create Configs
args = Namespace(cfg = cfg_file, modelDir='', logDir='')
lpn.update_config(lpn.cfg, args)
# Use cfg to create model
pose_model = lpn.get_pose_net(lpn.cfg, is_train=False)
# load pretrained weights
if torch.cuda.is_available():
pose_model = pose_model.cuda()
pose_model.load_state_dict(torch.load(ckp_file), strict=False)
else:
checkpoint = torch.load(ckp_file, map_location=torch.device(self.device))
pose_model.load_state_dict(checkpoint, strict=False)
pose_model.eval()
return pose_model, lpn.cfg
def default_transform(self):
return Compose([
Resize((256, 192)),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
def estimate(self, video):
# person detection on every 3rd frame interpolating in between
bboxes = detect_person('yolov5s', video, pred_every=3)
# Convert bboxes to correct aspect ratio
bboxes = | np.apply_along_axis(adjust_aspect_ratio, 1, bboxes, aspect_ratio=3/4) | numpy.apply_along_axis |
'''
Sampling source images from SMOT dataset
Download: https://www.acin.tuwien.ac.at/en/vision-for-robotics/software-tools/smot/
usage: python3 data_processing/process_SMOT.py [path/to/smot] [obj_name=="obj_01/obj_02/..."]
'''
#uniformly sample object crops using a renderer
import sys,os
sys.path.append(os.path.abspath('.')) # To find local version of the library
sys.path.append(os.path.abspath('../')) # To find local version of the library
from NOL_model import NOL_network as NOLnet
import cv2
import numpy as np
import open3d as o3d
import copy
from sklearn.neighbors import NearestNeighbors
from matplotlib import pyplot as plt
from skimage.transform import resize
import h5py
import math
from skimage.transform import resize
import NOL_tools.operations as to
import shutil
import transforms3d as tf3d
import json
if(len(sys.argv)<3):
print("usage: python3 data_processing/process_SMOT.py [path/to/smot] [obj_name e.g., obj_01,obj_02,...] [icp=yes(1)/no(0),default=1]")
sys.exit()
#smot_dir ="/root/hdd_linux/SMOT_Upload" #sys.argv[1]
#t_label="obj_01"
smot_dir=sys.argv[1]
t_label=sys.argv[2]
icp=1
if(len(sys.argv)==4):
icp=int(sys.argv[3])
#Load Camera parameters of SMOT dataset
camera_intrinsic_fn = os.path.join(smot_dir,"camera_intrinsic.json")
with open(camera_intrinsic_fn, 'r') as f:
camera_intrinsic = json.load(f)
cam_K=np.array(camera_intrinsic['intrinsic_matrix']).reshape(3,3).T
fx = cam_K[0,0]
fy = cam_K[1,1]
cx = cam_K[0,2]
cy = cam_K[1,2]
im_width = camera_intrinsic['width']
im_height = camera_intrinsic['height']
depth_th = 0.05 #depth threshold for visible mask
#Read GTS
d_type='train'
if(int(t_label[-2:])<5):
#obj_01~04: sequence 1
test_dir = os.path.join(smot_dir,d_type+"/seq{}".format(1))
else:
#obj_05~08: sequence 2
test_dir = os.path.join(smot_dir,d_type+"/seq{}".format(2))
gt_fn = os.path.join(test_dir,"scene_gt.json")
with open(gt_fn, 'r') as f:
scene_gt_smot = json.load(f)
#Define renderer
renderer = NOLnet.simple_render(img_h=im_height,img_w=im_width,cam_K=cam_K)
print("Loading the 3D model")
ply_fn = os.path.join(smot_dir,"models/{}.ply".format(t_label))
mesh = o3d.io.read_triangle_mesh(ply_fn)
vertices =np.array(mesh.vertices)
faces = np.array(mesh.triangles)
print("Computing visible verticies in each image")
n_scenes= len(scene_gt_smot.keys())
img_cnt=0
poses=[]
visible_verts=[]
v_ids=[]
scores=[] #visibility score to select the best image (with less occlusion) at the beginning
for s_id in sorted(scene_gt_smot.keys()):
scene_id = int(s_id)
gts= scene_gt_smot[str(scene_id)]
rgb_path = test_dir+"/color/{:06d}.png".format(scene_id)
depth_path = test_dir+"/depth/{:06d}.png".format(scene_id)
#img = cv2.imread(rgb_path)[:,:,::-1] #rgb is not necessary this time
depth = cv2.imread(depth_path,cv2.CV_16UC1)/1000
has_obj=False
for gt in gts:
obj_id = gt['obj_id']
if(int(obj_id)==int(t_label[-2:])):
tf_mat = np.eye(4)
tf_mat[:3,:3] =np.array(gt['cam_R_m2c']).reshape(3,3)
tf_mat[:3,3] =np.array(gt['cam_t_m2c'])
has_obj=True
break
if(has_obj):
simple_xyz = renderer.predict([np.array([vertices]),np.array([faces]),
np.array([tf_mat])])[0]
depth_r = simple_xyz[:,:,3]
mask = np.logical_and(np.abs(depth_r-depth)<depth_th,depth_r>0)
valid_vertices = to.get_valid_vertices(vertices,faces,depth_r,tf_mat,cam_K,im_width,im_height,
mask=mask)
n_full = np.sum(depth_r>0)
n_visible = np.sum(mask)
visratio=n_visible/n_full
scores.append(n_visible/n_full)
poses.append(tf_mat)
v_ids.append(s_id)
visible_verts.append(valid_vertices)
if(len(v_ids)%10==0):
print("processing: {:04d}/{:04d}".format(len(v_ids),n_scenes))
print("Finished:Computing visible verticies")
print("Iteratively adding new source images...")
scores=np.array(scores)
sorted_idx = np.argsort(scores).tolist()
th_trans = 0.3
th_rot = 45
selected_frames=[]
visible_v =np.copy(np.array(visible_verts))
sum_visible = np.zeros((visible_v.shape[1]))
data_idx=np.arange(scores.shape[0])
max_n=15 #limit maximum number of images
while visible_v.shape[0]>0:
active_idx = np.ones((visible_v.shape[0]),bool)
after_visible = sum_visible+visible_v
vert_score = np.sum(np.tanh(1.5*after_visible),axis=1)
idx =np.argmax(vert_score)
active_idx[idx]=0
before = np.sum(np.tanh(1.5*sum_visible))
sum_visible+= visible_v[idx]
after = np.sum(np.tanh(1.5*sum_visible))
print("Increased score for the observed vertices:", after-before)
if(after-before<1): #terminate the improvement is less than 1
break
d_idx = data_idx[idx]
score = scores[d_idx]
pose_q = poses[d_idx]
v_id = v_ids[d_idx]
selected_frames.append(v_id)
if(len(selected_frames)>max_n):
break
for del_id,idx_c in enumerate(data_idx):
pose_c = poses[idx_c]
tra_diff = np.linalg.norm(pose_c[:3,3]-pose_q[:3,3])
if(tra_diff<0.1):
rot_diff = np.abs(np.degrees(np.array(tf3d.euler.mat2euler(np.matmul( np.linalg.inv(pose_c[:3,:3]),pose_q[:3,:3])))))
if(rot_diff[0]<15 and rot_diff[1]<15 and rot_diff[2]<15 ): #consider the flipped hand
active_idx[del_id]=0
data_idx = data_idx[active_idx]
visible_v = visible_v[active_idx]
print("No. selected frames:",len(selected_frames))
print("Selected frames:",selected_frames)
print("Save selected frames to the NOL source format (.hdf5)")
if(icp==1):
print("ICP is enabled")
else:
print("[INFO] It is possible to perform ICP to have better poses using depth images, but it sometimes produces worse poses")
input_imgs=[]
poses_=[]
masks=[]
bboxes=[]
source_imgs=[]
for s_id in selected_frames:
scene_id = int(s_id)
gts= scene_gt_smot[str(scene_id)]
rgb_path = test_dir+"/color/{:06d}.png".format(scene_id)
depth_path = test_dir+"/depth/{:06d}.png".format(scene_id)
img = cv2.imread(rgb_path)[:,:,::-1] #rgb is not necessary this time
depth = cv2.imread(depth_path,cv2.CV_16UC1)/1000
has_obj=False
for gt in gts:
obj_id = gt['obj_id']
if(int(obj_id)==int(t_label[-2:])):
tf_mat = np.eye(4)
tf_mat[:3,:3] =np.array(gt['cam_R_m2c']).reshape(3,3)
tf_mat[:3,3] =np.array(gt['cam_t_m2c'])
has_obj=True
break
#noICP
simple_xyz = renderer.predict([np.array([vertices]),np.array([faces]),
np.array([tf_mat])])[0]
depth_r = simple_xyz[:,:,3]
mask = np.logical_and(np.abs(depth_r-depth)<depth_th,depth_r>0)
if(icp==1):
points_src = np.zeros((im_height,im_width,6),np.float32)
points_src[:,:,:3] = to.getXYZ(depth_r,cam_K[0,0],cam_K[1,1],cam_K[0,2],cam_K[1,2])
points_src[:,:,3:],_ = to.get_normal(depth_r,fx=cam_K[0,0],fy=cam_K[1,1],cx=cam_K[0,2],cy=cam_K[1,2])
points_src = points_src[mask]
points_tgt = np.zeros((depth.shape[0],depth.shape[1],6),np.float32)
points_tgt[:,:,:3] = to.getXYZ(depth,fx=cam_K[0,0],fy=cam_K[1,1],cx=cam_K[0,2],cy=cam_K[1,2])
points_tgt[:,:,3:],_ = to.get_normal(depth,fx=cam_K[0,0],fy=cam_K[1,1],cx=cam_K[0,2],cy=cam_K[1,2])
pts_tgt = points_tgt[mask]
icp_fnc = cv2.ppf_match_3d_ICP(100,tolerence=0.05,numLevels=4) #1cm
retval, residual, pose=icp_fnc.registerModelToScene(points_src.reshape(-1,6), pts_tgt.reshape(-1,6))
tf_mat = np.matmul(pose,tf_mat)
simple_xyz = renderer.predict([np.array([vertices]),np.array([faces]),
np.array([tf_mat])])[0]
depth_r = simple_xyz[:,:,3]
mask = np.logical_and(np.abs(depth_r-depth)<depth_th,depth_r>0)
#compute valid bbox and resize a cropped image and mask to 256,256
img_masked = np.copy(img)
img_masked = img_masked/255
vu_list = np.where(mask)
bbox = np.array([np.min(vu_list[0]),np.min(vu_list[1]),np.max(vu_list[0]),np.max(vu_list[1])],np.int32)
height = bbox[2]-bbox[0]
width = bbox[3]-bbox[1]
ct_v = int((bbox[2]+bbox[0])*0.5)
ct_u = int((bbox[3]+bbox[1])*0.5)
length = int(max(max(height*0.5,width*0.5),128))
img_ = np.zeros((256,256,3))
mask_ = np.zeros((256,256,1))
bbox_new = np.array([max(ct_v-length,0),max(ct_u-length,0),min(ct_v+length,im_height),min(ct_u+length,im_width)])
img_crop = img_masked[bbox_new[0]:bbox_new[2],bbox_new[1]:bbox_new[3]]
mask_crop = mask[bbox_new[0]:bbox_new[2],bbox_new[1]:bbox_new[3]]
img_crop = resize(img_crop,(256,256))
mask_crop = np.expand_dims(resize(mask_crop,(256,256))>0.5,axis=2)
input_imgs.append(img_crop)
poses_.append(tf_mat)
masks.append(mask_crop)
bboxes.append(bbox_new)
test_fn = "./sample_data/smot/"+t_label+".hdf5"
if not(os.path.exists("./sample_data/smot/")):os.makedirs("./sample_data/smot/")
train_data = h5py.File(test_fn, "w")
train_data.create_dataset("vertices_3d",data=vertices)
train_data.create_dataset("faces",data=np.array(faces))
train_data.create_dataset("images",data=np.array(input_imgs))
train_data.create_dataset("poses",data= | np.array(poses_) | numpy.array |
""" Hand Tracker on Depth Image based on https://www.learnopencv.com/object-tracking-using-opencv-cpp-python/
"""
import os
import PIL
import glob
import numpy as np
from matplotlib import pyplot as plt
import cv2
import json
import configparser
import csv
DATASET_BASE_DIR_NAME = r"D:\git\HandPointer\dataset"
def get_local_minima(img_d, img_c):
img_d[200:, :] = 10000
scale = 1/8
confidence_thrshold = 100
morph_kernel = np.ones((9, 9), np.uint8)
h, w = img_d.shape[:2]
sh = int(h*scale)
sw = int(w*scale)
imgd_scaled = cv2.resize(img_d, (sh, sw))
imgc_scaled = cv2.resize(img_c, (sh, sw))
mask = imgc_scaled > confidence_thrshold
fimgd = cv2.morphologyEx(imgd_scaled, cv2.MORPH_BLACKHAT, morph_kernel)
fimg = np.multiply(fimgd, mask.astype(np.uint8))
inv_mask = np.invert(mask)
imgd_scaled[inv_mask] = 10000
# imgd_scaled = np.multiply(imgd_scaled, mask.astype(np.uint8))
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(imgd_scaled, mask.astype(np.uint8))
imgd_scaled = imgd_scaled-400
cimg = (imgd_scaled.clip(min=0, max=600)/5).astype(np.uint8)
cimg = cv2.cvtColor(cimg, cv2.COLOR_GRAY2BGR)
cimg = cv2.drawMarker(cimg, min_loc, (0, 0, 0))
cimg = cv2.resize(cimg, (500, 500))
cv2.imshow("dpeth", cimg)
cv2.waitKey(1)
# print(min_loc, min_val)
# ax1 = plt.subplot(121)
# plt.imshow(mask)
# plt.subplot(122, sharex=ax1, sharey=ax1)
# plt.imshow(cimg), plt.title("after top hat")
# plt.show()
def check_velocity():
# json_file_name = os.path.join(DATASET_BASE_DIR_NAME, "result.json")
# if os.path.isfile(json_file_name):
# with open(json_file_name, "r", encoding='utf8') as fid:
# datasets_info = json.load(fid)
# dataset = datasets_info[id]
csv_file_names = ['D:\git\HandPointer\dataset\#git#ajay#DepthSensing#dataset#ds325#fast_circles.csv'
'D:\git\HandPointer\dataset\#git#ajay#DepthSensing#dataset#ds325#gestures_two_hands.csv',
'D:\git\HandPointer\dataset\#git#ajay#DepthSensing#dataset#ds325#gestures_two_hands_swap.csv',
'D:\git\HandPointer\dataset\#git#ajay#DepthSensing#dataset#ds325#sequence_closed_hand.csv',
'D:\git\HandPointer\dataset\#git#ajay#DepthSensing#dataset#ds325#sequence_open_hand.csv',
'D:\git\HandPointer\dataset\#git#ajay#DepthSensing#dataset#ds325#sequence_small_shapes.csv',
'D:\git\HandPointer\dataset\#git#ajay#DepthSensing#dataset#ds536#circle_ccw.csv',
'D:\git\HandPointer\dataset\#git#ajay#DepthSensing#dataset#ds536#circle_ccw_far.csv',
'D:\git\HandPointer\dataset\#git#ajay#DepthSensing#dataset#ds536#circle_ccw_hand.csv',
'D:\git\HandPointer\dataset\#git#ajay#DepthSensing#dataset#ds536#circle_sequence.csv',
'D:\git\HandPointer\dataset\#git#ajay#DepthSensing#dataset#ds536#multiple_shapes_1.csv',
'D:\git\HandPointer\dataset\#git#ajay#DepthSensing#dataset#ds536#rectangle_ccw.csv',
'D:\git\HandPointer\dataset\#git#ajay#DepthSensing#dataset#ds536#rectangle_cw.csv',
'D:\git\HandPointer\dataset\#git#ajay#DepthSensing#dataset#ds536#star.csv',
'D:\git\HandPointer\dataset\#git#ajay#DepthSensing#dataset#ds536#zigzag.csv',
'D:\git\HandPointer\dataset\#git#HandPointer#dataset#ds325#fast_circles.csv']
csv_file_name = os.path.join(r"D:\git\HandPointer\dataset", "#git#ajay#DepthSensing#dataset#ds325#fast_circles.csv")
csv_file_name = csv_file_names[13]
trajectory_data = np.zeros((800, 3))
with open(csv_file_name, "r") as fid:
reader = csv.reader(fid)
header = next(reader)
for row in reader:
if len(row)<4:
continue
file_id = int(row[0])
x = int(row[1])
y = int(row[2])
d = int(row[3])
trajectory_data[file_id,0] = x
trajectory_data[file_id,1] = y
trajectory_data[file_id,2] = d
velocity = np.zeros((800, 2))
step = 5
velocity[step:,0] = trajectory_data[step:,0] - trajectory_data[:-step,0]
velocity[step:,1] = trajectory_data[step:,1] - trajectory_data[:-step,1]
velocity_norm = np.linalg.norm(velocity, axis=1)
stop = (velocity_norm < 5).astype(np.uint8)
plt.subplot(311)
plt.plot(trajectory_data[:,0],'b')
plt.plot(trajectory_data[:,1],'r')
plt.plot(stop*100, 'g'), plt.title("xy")
plt.subplot(312), plt.plot(velocity[:,0],'b')
plt.plot(velocity[:,1],'r'), plt.title("velocity xy")
plt.subplot(313), plt.plot(velocity_norm,'b'), plt.title("velocty norm")
plt.plot(stop*100, 'g')
plt.show()
def get_datasets():
datasets = [
r"ds325\fast_circles",
r"ds325\gestures_two_hands",
r"ds325\gestures_two_hands_swap",
r"ds325\sequence_closed_hand",
r"ds325\sequence_open_hand",
r"ds325\sequence_small_shapes",
r"ds536\circle_ccw",
r"ds536\circle_ccw_far",
r"ds536\circle_ccw_hand",
r"ds536\circle_sequence",
r"ds536\multiple_shapes_1",
r"ds536\rectangle_ccw",
r"ds536\rectangle_cw",
r"ds536\star",
r"ds536\zigzag",
]
datasets = [os.path.join(DATASET_BASE_DIR_NAME, dataset) for dataset in datasets]
return datasets
datasets_info = [{
"base_dir_name" : r"D:\git\HandPointer\dataset\ds325\gestures_two_hands_swap",
"max_file_count" : 600,
"init_frame_id" : 50
}]
def calc_tajectory(file_id, loc, img_d, img_c):
# x = int(bbox[0] + bbox[2])//2
# y = int(bbox[1] + bbox[3])//2
x = int(loc[0])
y = int(loc[1])
depth = img_d[y,x]
confidence = img_c[y, x]
trajectory = {
"file_id" : file_id,
"finger_tip" : {
"x": x,
"y": y,
},
"depth": depth,
"confidence": confidence
}
return trajectory
def create_video_from_results():
dataset_dir_names = get_datasets()
for dataset_dir_name in dataset_dir_names:
camera_file_name = os.path.join(os.path.dirname(dataset_dir_name), "camera_parameters.txt")
mtx, dist, newcameramtx = read_camera_parameter(camera_file_name)
video_file_name = dataset_dir_name.replace(DATASET_BASE_DIR_NAME,"")[1:]
video_file_name = video_file_name.replace("\\", "_") + "_result.avi"
video_file_name = os.path.join(DATASET_BASE_DIR_NAME, video_file_name)
out = cv2.VideoWriter(video_file_name, cv2.VideoWriter_fourcc(*'DIVX'), 60, (320, 240))
file_names = glob.glob(os.path.join(dataset_dir_name, "*_result.png"), recursive=True)
for file_name in file_names:
if not os.path.isfile(file_name):
continue
img = np.array(PIL.Image.open(file_name))
img = img[:,:,::-1]
img = cv2.undistort(img, mtx, dist, None, newcameramtx)
out.write(img)
out.release()
def create_video():
dataset_dir_names = get_datasets()
for dataset_dir_name in dataset_dir_names:
camera_file_name = os.path.join(os.path.dirname(dataset_dir_name), "camera_parameters.txt")
mtx, dist, newcameramtx = read_camera_parameter(camera_file_name)
video_file_name = dataset_dir_name.replace(DATASET_BASE_DIR_NAME,"")[1:]
video_file_name = video_file_name.replace("\\", "_") + "_depth.avi"
video_file_name = os.path.join(DATASET_BASE_DIR_NAME, video_file_name)
out = cv2.VideoWriter(video_file_name, cv2.VideoWriter_fourcc(*'DIVX'), 60, (320, 240))
file_names = glob.glob(os.path.join(dataset_dir_name, "*_depth.tiff"), recursive=True)
for file_name in file_names:
confidence_file_name = file_name.replace("depth", "confidence")
if not os.path.isfile(file_name) or not os.path.isfile(confidence_file_name):
continue
img_d = np.array(PIL.Image.open(file_name)).astype(np.float)*0.1
img_c = np.array(PIL.Image.open(confidence_file_name)).astype(np.float)*0.1
img_d = np.clip(img_d, 0, 255).astype(np.uint8)
img_c = np.clip(img_c, 0, 255).astype(np.uint8)
img_d = cv2.undistort(img_d, mtx, dist, None, newcameramtx)
img_c = cv2.undistort(img_c, mtx, dist, None, newcameramtx)
img_out = | np.zeros((*img_d.shape, 3), dtype=np.uint8) | numpy.zeros |
import numpy as np
import math
import scipy
from fractions import Fraction
import itertools
import biotuner
from biotuner.biotuner_utils import *
import matplotlib.pyplot as plt
from numpy import array, zeros, ones, arange, log2, sqrt, diff, concatenate
import pytuning
from math import gcd
from numpy import array, zeros, ones, arange, log2, sqrt, diff, concatenate
from scipy.stats import norm
from scipy.signal import argrelextrema, detrend
import scipy.signal as ss
from pytuning import create_euler_fokker_scale
from collections import Counter
from functools import reduce
from pytuning.utilities import normalize_interval
from pactools import Comodulogram, REFERENCES
'''---------------------------------------------------------Extended peaks-------------------------------------------------------------'''
'''EXTENDED PEAKS from expansions
'''
def EEG_harmonics_mult(peaks, n_harmonics, n_oct_up = 0):
"""
Natural harmonics
This function takes a list of frequency peaks as input and computes the desired number of harmonics
with the formula: x, 2x, 3x ..., nx
peaks: List (float)
Peaks represent local maximum in a spectrum
n_harmonics: int
Number of harmonics to compute
n_oct_up: int
Defaults to 0. Corresponds to the number of octave the peaks are shifted
Returns
-------
multi_harmonics: array
(n_peaks, n_harmonics + 1)
"""
n_harmonics = n_harmonics + 2
multi_harmonics = []
multi_harmonics_rebound = []
for p in peaks:
multi_harmonics_r = []
multi_harm_temp = []
harmonics = []
p = p * (2**n_oct_up)
i = 1
harm_temp = p
while i < n_harmonics:
harm_temp = p * i
harmonics.append(harm_temp)
i+=1
multi_harmonics.append(harmonics)
multi_harmonics = np.array(multi_harmonics)
return multi_harmonics
def EEG_harmonics_div(peaks, n_harmonics, n_oct_up = 0, mode = 'div'):
"""
Natural sub-harmonics
This function takes a list of frequency peaks as input and computes the desired number of harmonics
with using division:
peaks: List (float)
Peaks represent local maximum in a spectrum
n_harmonics: int
Number of harmonics to compute
n_oct_up: int
Defaults to 0. Corresponds to the number of octave the peaks are shifted
mode: str
Defaults to 'div'.
'div': x, x/2, x/3 ..., x/n
'div_add': x, (x+x/2), (x+x/3), ... (x+x/n)
'div_sub': x, (x-x/2), (x-x/3), ... (x-x/n)
Returns
-------
div_harmonics: array
(n_peaks, n_harmonics + 1)
div_harmonics_bounded: array
(n_peaks, n_harmonics + 1)
"""
n_harmonics = n_harmonics + 2
div_harmonics = []
for p in peaks:
harmonics = []
p = p * (2**n_oct_up)
i = 1
harm_temp = p
while i < n_harmonics:
if mode == 'div':
harm_temp = (p/i)
if mode == 'div_add':
harm_temp = p + (p/i)
if mode == 'div_sub':
harm_temp = p - (p/i)
harmonics.append(harm_temp)
i+=1
div_harmonics.append(harmonics)
div_harmonics = np.array(div_harmonics)
div_harmonics_bounded = div_harmonics.copy()
#Rebound the result between 1 and 2
for i in range(len(div_harmonics_bounded)):
for j in range(len(div_harmonics_bounded[i])):
div_harmonics_bounded[i][j] = rebound(div_harmonics_bounded[i][j])
return div_harmonics, div_harmonics_bounded
def harmonic_fit(peaks, n_harm = 10, bounds = 1, function = 'mult', div_mode = 'div', n_common_harms = 5):
"""
This function computes harmonics of a list of peaks and compares the lists of harmonics pairwise to find fitting
between the harmonic series
peaks: List (float)
Peaks represent local maximum in a spectrum
n_harm: int
Number of harmonics to compute
bounds: int
Minimum distance (in Hz) between two frequencies to consider a fit
function: str
Defaults to 'mult'.
'mult' will use natural harmonics
'div' will use natural sub-harmonics
div_mode: str
Defaults to 'div'. See EEG_harmonics_div function.
Returns
-------
"""
from itertools import combinations
peak_bands = []
for i in range(len(peaks)):
peak_bands.append(i)
if function == 'mult':
multi_harmonics = EEG_harmonics_mult(peaks, n_harm)
elif function == 'div':
multi_harmonics, x = EEG_harmonics_div(peaks, n_harm, mode = div_mode)
elif function == 'exp':
multi_harmonics = []
increments = []
for h in range(n_harm+1):
h += 1
multi_harmonics.append([i**h for i in peaks])
multi_harmonics = np.array(multi_harmonics)
multi_harmonics = np.moveaxis(multi_harmonics, 0, 1)
#print(np.array(multi_harmonics).shape)
list_peaks = list(combinations(peak_bands,2))
#print(list_peaks)
harm_temp = []
harm_list1 = []
harm_list2 = []
harm_list = []
harmonics = []
for i in range(len(list_peaks)):
harms, _, _, d, e, harm_list = compareLists(multi_harmonics[list_peaks[i][0]], multi_harmonics[list_peaks[i][1]], bounds)
harm_temp.append(harms)
harm_list1.append(d)
harm_list2.append(e)
harmonics.append(harm_list)
harm_fit = np.array(harm_temp).squeeze()
harmonics = reduce(lambda x, y: x+y, harmonics)
most_common_harmonics= [h for h, h_count in Counter(harmonics).most_common(n_common_harms) if h_count > 1]
harmonics = list(np.sort(list(set(harmonics))))
if len(peak_bands) > 2:
harm_fit = list(itertools.chain.from_iterable(harm_fit))
harm_fit = [round(num, 3) for num in harm_fit]
harm_fit = list(dict.fromkeys(harm_fit))
harm_fit = list(set(harm_fit))
return harm_fit, harm_list1, harm_list2, harmonics, most_common_harmonics
'''EXTENDED PEAKS from restrictions
'''
def consonance_peaks (peaks, limit):
"""
This function computes consonance (for a given ratio a/b, when a < 2b, consonance corresponds to (a+b)/(a*b)) between peaks
peaks: List (float)
Peaks represent local maximum in a spectrum
limit: float
minimum consonance value to keep associated pairs of peaks
Comparisons with familiar ratios:
Unison-frequency ratio 1:1 yields a value of 2
Octave-frequency ratio 2:1 yields a value of 1.5
Perfect 5th-frequency ratio 3:2 yields a value of 0.833
Perfect 4th-frequency ratio 4:3 yields a value of 0.583
Major 6th-frequency ratio 5:3 yields a value of 0.533
Major 3rd-frequency ratio 5:4 yields a value of 0.45
Minor 3rd-frequency ratio 5:6 yields a value of 0.366
Minor 6th-frequency ratio 5:8 yields a value of 0.325
Major 2nd-frequency ratio 8:9 yields a value of 0.236
Major 7th-frequency ratio 8:15 yields a value of 0.192
Minor 7th-frequency ratio 9:16 yields a value of 0.174
Minor 2nd-frequency ratio 15:16 yields a value of 0.129
Returns
-------
consonance: List (float)
consonance scores for each pairs of consonant peaks
cons_pairs: List of lists (float)
list of lists of each pairs of consonant peaks
cons_peaks: List (float)
list of consonant peaks (no doublons)
cons_tot: float
averaged consonance value for each pairs of peaks
"""
from fractions import Fraction
consonance_ = []
peaks2keep = []
peaks_consonance = []
cons_tot = []
for p1 in peaks:
for p2 in peaks:
peaks2keep_temp = []
p2x = p2
p1x = p1
if p1x > p2x:
while p1x > p2x:
p1x = p1x/2
if p1x < p2x:
while p2x > p1x:
p2x = p2x/2
if p1x < 0.1:
p1x = 0.06
if p2x < 0.1:
p2x = 0.06 #random number to avoid division by 0
ratio = Fraction(p2x/p1x).limit_denominator(1000)
cons_ = (ratio.numerator + ratio.denominator)/(ratio.numerator * ratio.denominator)
if cons_ < 1 :
cons_tot.append(cons_)
if cons_ > 1 or cons_ < limit:
cons_ = None
cons_ = None
p2x = None
p1x = None
if p2x != None:
peaks2keep_temp.extend([p2, p1])
consonance_.append(cons_)
peaks2keep.append(peaks2keep_temp)
#cons_pairs = np.array(peaks2keep)
cons_pairs = [x for x in peaks2keep if x]
#consonance = np.array(consonance_)
consonance = [i for i in consonance_ if i]
cons_peaks = list(itertools.chain(*cons_pairs))
cons_peaks = [np.round(c, 2) for c in cons_peaks]
cons_peaks = list(set(cons_peaks))
#consonance = list(set(consonance))
return consonance, cons_pairs, cons_peaks, np.average(cons_tot)
def multi_consonance(cons_pairs, n_freqs = 5):
"""
Function that keeps the frequencies that are the most consonant with others
Takes pairs of frequencies that are consonant (output of the 'compute consonance' function)
cons_pairs: List of lists (float)
list of lists of each pairs of consonant peaks
n_freqs: int
maximum number of consonant freqs to keep
Returns
-------
freqs_related: List (float)
peaks that are consonant with at least two other peaks, starting with the peak that is
consonant with the maximum number of other peaks
"""
freqs_dup = list(itertools.chain(*cons_pairs))
pairs_temp = list(itertools.chain.from_iterable(cons_pairs))
freqs_nodup = list(dict.fromkeys(pairs_temp))
f_count = []
for f in freqs_nodup:
f_count.append(freqs_dup.count(f))
freqs_related = [x for _,x in sorted(zip(f_count,freqs_nodup))][-(n_freqs):][::-1]
return freqs_related
def consonant_ratios (peaks, limit, sub = False, input_type = 'peaks', metric = 'cons'):
"""
Function that computes integer ratios from peaks with higher consonance
Needs at least two pairs of values
peaks: List (float)
Peaks represent local maximum in a spectrum
limit: float
minimum consonance value to keep associated pairs of peaks
sub: boolean
Defaults to False
When set to True, include ratios a/b when a < b.
Returns
-------
cons_ratios: List (float)
list of consonant ratios
consonance: List (float)
list of associated consonance values
"""
from fractions import Fraction
consonance_ = []
ratios2keep = []
if input_type == 'peaks':
ratios = compute_peak_ratios(peaks, sub = sub)
if input_type == 'ratios':
ratios = peaks
for ratio in ratios:
frac = Fraction(ratio).limit_denominator(1000)
if metric == 'cons':
cons_ = (frac.numerator + frac.denominator)/(frac.numerator * frac.denominator)
if metric == 'harmsim':
cons_ = dyad_similarity(ratio)
if cons_ > limit :
consonance_.append(cons_)
ratios2keep.append(ratio)
#print(ratios2keep)
ratios2keep = np.array(np.round(ratios2keep, 3))
cons_ratios = np.sort(list(set(ratios2keep)))
#cons_ratios = np.array(ratios2keep)
#ratios = []
#ratios = [ratios.append(x) for x in ratios2keep if x not in ratios]
consonance = np.array(consonance_)
consonance = [i for i in consonance if i]
return cons_ratios, consonance
def timepoint_consonance (data, method = 'cons', limit = 0.2, min_notes = 3):
"""
## Function that keeps moments of consonance from multiple time series of peak frequencies
data: List of lists (float)
Axis 0 represents moments in time
Axis 1 represents the sets of frequencies
method: str
Defaults to 'cons'
'cons' will compute pairwise consonance between frequency peaks in the form of (a+b)/(a*b)
'euler' will compute Euler's gradus suavitatis
limit: float
limit of consonance under which the set of frequencies are not retained
When method = 'cons'
--> See consonance_peaks method's doc to refer consonance values to common intervals
When method = 'euler'
--> Major (4:5:6) = 9
Minor (10:12:15) = 9
Major 7th (8:10:12:15) = 10
Minor 7th (10:12:15:18) = 11
Diminish (20:24:29) = 38
min_notes: int
minimum number of consonant frequencies in the chords. Only relevant when method is set to 'cons'.
Returns
-------
chords: List of lists (float)
Axis 0 represents moments in time
Axis 1 represents the sets of consonant frequencies
positions: List (int)
positions on Axis 0
"""
data = np.moveaxis(data, 0, 1)
#print('NAN', np.argwhere(np.isnan(data)))
out = []
positions = []
for count, peaks in enumerate(data):
peaks = [x for x in peaks if x >= 0]
if method == 'cons':
cons, b, peaks_cons, d = consonance_peaks(peaks, limit)
#print(peaks_cons)
out.append(peaks_cons)
if len(list(set(peaks_cons))) >= min_notes:
positions.append(count)
if method == 'euler':
peaks_ = [int(np.round(p, 2)*100) for p in peaks]
#print(peaks_)
eul = euler(*peaks_)
#print(eul)
if eul < limit:
out.append(list(peaks))
positions.append(count)
out = [x for x in out if x != []]
#if method == 'cons':
out = list(out for out,_ in itertools.groupby(out))
chords = [x for x in out if len(x)>=min_notes]
return chords, positions
'''
################################################## PEAKS METRICS ############################################################
'''
#Consonance#
#Input: peaks
def consonance (ratio, limit = 1000):
''' Compute metric of consonance from a single ratio of frequency
ratio: float
limit: int
Defaults to 1000
Maximum value of the denominator of the fraction representing the ratio
'''
ratio = Fraction(float(ratio)).limit_denominator(limit)
cons = (ratio.numerator + ratio.denominator)/(ratio.numerator * ratio.denominator)
return cons
def euler(*numbers):
"""
Euler's "gradus suavitatis" (degree of sweetness) function
Return the "degree of sweetness" of a musical interval or chord expressed
as a ratio of frequencies a:b:c, according to Euler's formula
Greater values indicate more dissonance
numbers: List (int)
frequencies
"""
factors = prime_factors(lcm(*reduced_form(*numbers)))
return 1 + sum(p - 1 for p in factors)
#Input: peaks
def tenneyHeight(peaks, avg = True):
"""
Tenney Height is a measure of inharmonicity calculated on two frequencies (a/b) reduced in their simplest form.
It can also be called the log product complexity of a given interval.
peaks: List (float)
frequencies
avg: Boolean
Default to True
When set to True, all tenney heights are averaged
"""
pairs = getPairs(peaks)
pairs
tenney = []
for p in pairs:
try:
frac = Fraction(p[0]/p[1]).limit_denominator(1000)
except ZeroDivisionError:
p[1] = 0.01
frac = Fraction(p[0]/p[1]).limit_denominator(1000)
x = frac.numerator
y = frac.denominator
tenney.append(log2(x*y))
if avg == True:
tenney = np.average(tenney)
return tenney
def peaks_to_metrics (peaks, n_harm = 10):
'''
This function computes different metrics on peak frequencies.
peaks: List (float)
Peaks represent local maximum in a spectrum
n_harm: int
Number of harmonics to compute for 'harm_fit' metric
Returns
-------
metrics: dict (float)
Dictionary of values associated to metrics names
metrics_list: List (float)
list of peaks metrics values in the order: 'cons', 'euler', 'tenney', 'harm_fit'
'''
peaks = list(peaks)
metrics = {'cons' : 0, 'euler' : 0, 'tenney': 0, 'harm_fit': 0}
harm_fit, harm_pos1, harm_pos2 = harmonic_fit(peaks, n_harm = n_harm)
metrics['harm_pos1'] = harm_pos1
metrics['harm_pos2'] = harm_pos2
metrics['harm_fit'] = len(harm_fit)
a, b, c, metrics['cons'] = consonance_peaks (peaks, 0.1)
peaks_highfreq = [int(p*1000) for p in peaks]
metrics['euler'] = euler(*peaks_highfreq)
metrics['tenney'] = tenneyHeight(peaks_highfreq)
metrics_list = []
for value in metrics.values():
metrics_list.append(value)
return metrics, metrics_list
def metric_denom(ratio):
'''Function that computes the denominator of the normalized ratio
ratio: float
'''
ratio = sp.Rational(ratio).limit_denominator(10000)
normalized_degree = normalize_interval(ratio)
y = int(sp.fraction(normalized_degree)[1])
return y
'''SCALE METRICS'''
'''Metric of harmonic similarity represents the degree of similarity between a scale and the natural harmonic series ###
Implemented from Gill and Purves (2009)'''
def dyad_similarity(ratio):
'''
This function computes the similarity between a dyad of frequencies and the natural harmonic series
ratio: float
frequency ratio
'''
frac = Fraction(float(ratio)).limit_denominator(1000)
x = frac.numerator
y = frac.denominator
z = ((x+y-1)/(x*y))*100
return z
#Input: ratios (list of floats)
def ratios2harmsim (ratios):
'''
This function computes the similarity for each ratio of a list
ratios: List (float)
list of frequency ratios (forming a scale)
Returns
---------
similarity: List (float)
list of percentage of similarity for each ratios
'''
fracs = []
for r in ratios:
fracs.append(Fraction(r).limit_denominator(1000))
sims = []
for f in fracs:
sims.append(dyad_similarity(f.numerator/f.denominator))
similarity = np.array(sims)
return similarity
def scale_cons_matrix (scale, function):
'''
This function gives a metric of a scale corresponding to the averaged metric for each pairs of ratios (matrix)
scale: List (float)
function: function
possible functions: dyad_similarity
consonance
metric_denom
'''
metric_values = []
mode_values = []
for index1 in range(len(scale)):
for index2 in range(len(scale)):
if scale[index1] > scale[index2]: #not include the diagonale in the computation of the avg. consonance
entry = scale[index1]/scale[index2]
mode_values.append([scale[index1], scale[index2]])
metric_values.append(function(entry))
return np.average(metric_values)
def PyTuning_metrics(scale, maxdenom):
'''
This function computes the scale metrics of the PyTuning library (https://pytuning.readthedocs.io/en/0.7.2/metrics.html)
Smaller values are more consonant
scale: List (float)
List of ratios corresponding to scale steps
maxdenom: int
Maximum value of the denominator for each step's fraction
'''
scale_frac, num, denom = scale2frac(scale, maxdenom)
metrics = pytuning.metrics.all_metrics(scale_frac)
sum_p_q = metrics['sum_p_q']
sum_distinct_intervals = metrics['sum_distinct_intervals']
metric_3 = metrics['metric_3']
sum_p_q_for_all_intervals = metrics['sum_p_q_for_all_intervals']
sum_q_for_all_intervals = metrics['sum_q_for_all_intervals']
return sum_p_q, sum_distinct_intervals, metric_3, sum_p_q_for_all_intervals, sum_q_for_all_intervals
def scale_to_metrics(scale):
'''
This function computes the scale metrics of the PyTuning library and other scale metrics
scale: List (float)
List of ratios corresponding to scale steps
Returns
----------
scale_metrics: dictionary
keys correspond to metrics names
scale_metrics_list: List (float)
List of values corresponding to all computed metrics (in the same order as dictionary)
'''
scale_frac, num, denom = scale2frac(scale, maxdenom=1000)
scale_metrics = pytuning.metrics.all_metrics(scale_frac)
scale_metrics['harm_sim'] = np.round(np.average(ratios2harmsim(scale)), 2)
scale_metrics['matrix_harm_sim'] = scale_cons_matrix(scale, dyad_similarity)
scale_metrics['matrix_cons'] = scale_cons_matrix(scale, consonance)
scale_metrics_list = []
for value in scale_metrics.values():
scale_metrics_list.append(value)
return scale_metrics, scale_metrics_list
def scale_consonance (scale, function, rounding = 4):
'''
Function that gives the average consonance of each scale interval
scale: List (float)
scale to reduce
function: function
function used to compute the consonance between pairs of ratios
Choose between: consonance, dyad_similarity, metric_denom
'''
metric_values = []
mode_values = []
for index1 in range(len(scale)):
metric_value = []
for index2 in range(len(scale)):
entry = scale[index1]/scale[index2]
mode_values.append([scale[index1], scale[index2]])
metric_value.append(function(entry))
metric_values.append(np.average(metric_value))
return metric_values
'''
################################################ SCALE CONSTRUCTION ##############################################################
'''
def oct_subdiv(ratio, octave_limit = 0.01365 ,octave = 2 ,n = 5):
'''
N-TET tuning from Generator Interval
This function uses a generator interval to suggest numbers of steps to divide the octave,
so the given interval will be approximately present (octave_limit) in the steps of the N-TET tuning.
ratio: float
ratio that corresponds to the generator_interval
e.g.: by giving the fifth (3/2) as generator interval, this function will suggest to subdivide the octave in 12, 53, ...
octave_limit: float
Defaults to 0.01365 (Pythagorean comma)
approximation of the octave corresponding to the acceptable distance between the ratio of the generator interval after
multiple iterations and the octave value.
octave: int
Defaults to 2
value of the octave
n: int
Defaults to 5
number of suggested octave subdivisions
Returns
-------
Octdiv: List (int)
list of N-TET tunings corresponding to dividing the octave in equal steps
Octvalue: List (float)
list of the approximations of the octave for each N-TET tuning
'''
Octdiv, Octvalue, i = [], [], 1
ratios = []
while len(Octdiv) < n:
ratio_mult = (ratio**i)
while ratio_mult > octave:
ratio_mult = ratio_mult/octave
rescale_ratio = ratio_mult - round(ratio_mult)
ratios.append(ratio_mult)
i+=1
if -octave_limit < rescale_ratio < octave_limit:
Octdiv.append(i-1)
Octvalue.append(ratio_mult)
else:
continue
return Octdiv, Octvalue
def compare_oct_div(Octdiv = 12, Octdiv2 = 53, bounds = 0.005, octave = 2):
'''
Function that compare steps for two N-TET tunings and return matching ratios and corresponding degrees
Octdiv: int
Defaults to 12.
first N-TET tuning number of steps
Octdiv2: int
Defaults to 53.
second N-TET tuning number of steps
bounds: float
Defaults to 0.005
Maximum distance between 1 ratio of Octdiv and 1 ratio of Octdiv2 to consider a match
octave: int
Defaults to 2
value of the octave
Returns
-------
avg_ratios: List (float)
list of ratios corresponding to the shared steps in the two N-TET tunings
shared_steps: List of tuples
the two elements of each tuple corresponds to the scale steps sharing the same interval in the two N-TET tunings
'''
ListOctdiv = []
ListOctdiv2 = []
OctdivSum = 1
OctdivSum2 = 1
i = 1
i2 = 1
while OctdivSum < octave:
OctdivSum =(nth_root(octave, Octdiv))**i
i+=1
ListOctdiv.append(OctdivSum)
while OctdivSum2 < octave:
OctdivSum2 =(nth_root(octave, Octdiv2))**i2
i2+=1
ListOctdiv2.append(OctdivSum2)
shared_steps = []
avg_ratios = []
for i, n in enumerate(ListOctdiv):
for j, harm in enumerate(ListOctdiv2):
if harm-bounds < n < harm+bounds:
shared_steps.append((i+1, j+1))
avg_ratios.append((n+harm)/2)
return avg_ratios, shared_steps
#Output1: octave subdivisions
#Output2: ratios that led to Output1
def multi_oct_subdiv (peaks, max_sub = 100, octave_limit = 1.01365, octave = 2, n_scales = 10, cons_limit = 0.1):
'''
This function uses the most consonant peaks ratios as input of oct_subdiv function. Each consonant ratio
leads to a list of possible octave subdivisions. These lists are compared and optimal octave subdivisions are
determined.
peaks: List (float)
Peaks represent local maximum in a spectrum
max_sub: int
Defaults to 100.
Maximum number of intervals in N-TET tuning suggestions.
octave_limit: float
Defaults to 1.01365 (Pythagorean comma).
Approximation of the octave corresponding to the acceptable distance between the ratio of the generator interval after
multiple iterations and the octave value.
octave: int
Defaults to 2.
value of the octave
n_scales: int
Defaults to 10.
Number of N-TET tunings to compute for each generator interval (ratio).
Returns
-------
multi_oct_div: List (int)
List of octave subdivisions that fit with multiple generator intervals.
ratios: List (float)
list of the generator intervals for which at least 1 N-TET tuning match with another generator interval.
'''
import itertools
from collections import Counter
#a, b, pairs, cons = consonance_peaks(peaks, cons_limit)
ratios, cons = consonant_ratios(peaks, cons_limit)
list_oct_div = []
for i in range(len(ratios)):
list_temp, _ = oct_subdiv(ratios[i], octave_limit, octave, n_scales)
list_oct_div.append(list_temp)
counts = Counter(list(itertools.chain(*list_oct_div)))
oct_div_temp = []
for k, v in counts.items():
if v > 1:
oct_div_temp.append(k)
oct_div_temp = np.sort(oct_div_temp)
multi_oct_div = []
for i in range(len(oct_div_temp)):
if oct_div_temp[i] < max_sub:
multi_oct_div.append(oct_div_temp[i])
return multi_oct_div, ratios
def harmonic_tuning (list_harmonics, octave = 2, min_ratio = 1, max_ratio = 2):
'''
Function that computes a tuning based on a list of harmonic positions
list_harmonics: List (int)
harmonic positions to use in the scale construction
octave: int
min_ratio: float
max_ratio: float
'''
ratios = []
for i in list_harmonics:
ratios.append(rebound(1*i, min_ratio, max_ratio, octave))
ratios = list(set(ratios))
ratios = list(np.sort(np.array(ratios)))
return ratios
def euler_fokker_scale(intervals, n = 1):
'''
Function that takes as input a series of intervals and derives a Euler Fokker Genera scale
intervals: List (float)
n: int
Defaults to 1
number of times the interval is used in the scale generation
'''
multiplicities = [n for x in intervals]
scale = create_euler_fokker_scale(intervals, multiplicities)
return scale
def generator_interval_tuning (interval = 3/2, steps = 12, octave = 2):
'''
Function that takes a generator interval and derives a tuning based on its stacking.
interval: float
Generator interval
steps: int
Defaults to 12 (12-TET for interval 3/2)
Number of steps in the scale
octave: int
Defaults to 2
Value of the octave
'''
scale = []
for s in range(steps):
s += 1
degree = interval**s
while degree > octave:
degree = degree/octave
scale.append(degree)
return sorted(scale)
#function that takes two ratios a input (boundaries of )
#The mediant corresponds to the interval where small and large steps are equal.
def tuning_range_to_MOS (frac1, frac2, octave = 2, max_denom_in = 100, max_denom_out = 100):
gen1 = octave**(frac1)
gen2 = octave**(frac2)
a = Fraction(frac1).limit_denominator(max_denom_in).numerator
b = Fraction(frac1).limit_denominator(max_denom_in).denominator
c = Fraction(frac2).limit_denominator(max_denom_in).numerator
d = Fraction(frac2).limit_denominator(max_denom_in).denominator
print(a, b, c, d)
mediant = (a+c)/(b+d)
mediant_frac = sp.Rational((a+c)/(b+d)).limit_denominator(max_denom_out)
gen_interval = octave**(mediant)
gen_interval_frac = sp.Rational(octave**(mediant)).limit_denominator(max_denom_out)
MOS_signature = [d, b]
invert_MOS_signature = [b, d]
return mediant, mediant_frac, gen_interval, gen_interval_frac, MOS_signature, invert_MOS_signature
#def tuning_embedding ()
def stern_brocot_to_generator_interval (ratio, octave = 2):
gen_interval = octave**(ratio)
return gen_interval
def gen_interval_to_stern_brocot (gen):
root_ratio = log2(gen)
return root_ratio
#Dissonance
def dissmeasure(fvec, amp, model='min'):
"""
Given a list of partials in fvec, with amplitudes in amp, this routine
calculates the dissonance by summing the roughness of every sine pair
based on a model of Plomp-Levelt's roughness curve.
The older model (model='product') was based on the product of the two
amplitudes, but the newer model (model='min') is based on the minimum
of the two amplitudes, since this matches the beat frequency amplitude.
"""
# Sort by frequency
sort_idx = np.argsort(fvec)
am_sorted = np.asarray(amp)[sort_idx]
fr_sorted = np.asarray(fvec)[sort_idx]
# Used to stretch dissonance curve for different freqs:
Dstar = 0.24 # Point of maximum dissonance
S1 = 0.0207
S2 = 18.96
C1 = 5
C2 = -5
# Plomp-Levelt roughness curve:
A1 = -3.51
A2 = -5.75
# Generate all combinations of frequency components
idx = np.transpose(np.triu_indices(len(fr_sorted), 1))
fr_pairs = fr_sorted[idx]
am_pairs = am_sorted[idx]
Fmin = fr_pairs[:, 0]
S = Dstar / (S1 * Fmin + S2)
Fdif = fr_pairs[:, 1] - fr_pairs[:, 0]
if model == 'min':
a = np.amin(am_pairs, axis=1)
elif model == 'product':
a = np.prod(am_pairs, axis=1) # Older model
else:
raise ValueError('model should be "min" or "product"')
SFdif = S * Fdif
D = np.sum(a * (C1 * np.exp(A1 * SFdif) + C2 * np.exp(A2 * SFdif)))
return D
#Input: peaks and amplitudes
def diss_curve (freqs, amps, denom=1000, max_ratio=2, euler_comp = True, method = 'min', plot = True, n_tet_grid = None):
'''
This function computes the dissonance curve and related metrics for a given set of frequencies (freqs) and amplitudes (amps)
freqs: List (float)
list of frequencies associated with spectral peaks
amps: List (float)
list of amplitudes associated with freqs (must be same lenght)
denom: int
Defaults to 1000.
Highest value for the denominator of each interval
max_ratio: int
Defaults to 2.
Value of the maximum ratio
Set to 2 for a span of 1 octave
Set to 4 for a span of 2 octaves
Set to 8 for a span of 3 octaves
Set to 2**n for a span of n octaves
euler: Boolean
Defaults to True
When set to True, compute the Euler Gradus Suavitatis for the derived scale
method: str
Defaults to 'min'
Can be set to 'min' or 'product'. Refer to dissmeasure function for more information.
plot: boolean
Defaults to True
When set to True, a plot of the dissonance curve will be generated
n_tet_grid: int
Defaults to None
When an integer is given, dotted lines will be add to the plot a steps of the given N-TET scale
Returns
-------
intervals: List of tuples
Each tuple corresponds to the numerator and the denominator of each scale step ratio
ratios: List (float)
list of ratios that constitute the scale
euler_score: int
value of consonance of the scale
diss: float
value of averaged dissonance of the total curve
dyad_sims: List (float)
list of dyad similarities for each ratio of the scale
'''
from numpy import array, linspace, empty, concatenate
from scipy.signal import argrelextrema
from fractions import Fraction
freqs = np.array(freqs)
r_low = 1
alpharange = max_ratio
method = method
n = 1000
diss = empty(n)
a = concatenate((amps, amps))
for i, alpha in enumerate(linspace(r_low, alpharange, n)):
f = concatenate((freqs, alpha*freqs))
d = dissmeasure(f, a, method)
diss[i] = d
diss_minima = argrelextrema(diss, np.less)
intervals = []
for d in range(len(diss_minima[0])):
frac = Fraction(diss_minima[0][d]/(n/(max_ratio-1))+1).limit_denominator(denom)
frac = (frac.numerator, frac.denominator)
intervals.append(frac)
intervals.append((2, 1))
ratios = [i[0]/i[1] for i in intervals]
ratios_sim = [np.round(r, 2) for r in ratios] #round ratios for similarity measures of harmonic series
#print(ratios_sim)
dyad_sims = ratios2harmsim(ratios[:-1]) # compute dyads similarities with natural harmonic series
dyad_sims
a = 1
ratios_euler = [a]+ratios
ratios_euler = [int(round(num, 2)*1000) for num in ratios]
#print(ratios_euler)
euler_score = None
if euler_comp == True:
euler_score = euler(*ratios_euler)
euler_score = euler_score/len(diss_minima)
else:
euler_score = 'NaN'
if plot == True:
plt.figure(figsize=(14, 6))
plt.plot(linspace(r_low, alpharange, len(diss)), diss)
plt.xscale('linear')
plt.xlim(r_low, alpharange)
try:
plt.text(1.9, 1.5, 'Euler = '+str(int(euler_score)), horizontalalignment = 'center',
verticalalignment='center', fontsize = 16)
except:
pass
for n, d in intervals:
plt.axvline(n/d, color='silver')
# Plot N-TET grid
if n_tet_grid != None:
n_tet = NTET_ratios(n_tet_grid, max_ratio = max_ratio)
for n in n_tet :
plt.axvline(n, color='red', linestyle = '--')
# Plot scale ticks
plt.minorticks_off()
plt.xticks([n/d for n, d in intervals],
['{}/{}'.format(n, d) for n, d in intervals], fontsize = 13)
plt.yticks(fontsize = 13)
plt.tight_layout()
plt.show()
return intervals, ratios, euler_score, np.average(diss), dyad_sims
'''Harmonic Entropy'''
def compute_harmonic_entropy_domain_integral(ratios, ratio_interval, spread=0.01, min_tol=1e-15):
# The first step is to pre-sort the ratios to speed up computation
ind = np.argsort(ratios)
weight_ratios = ratios[ind]
centers = (weight_ratios[:-1] + weight_ratios[1:]) / 2
ratio_interval = array(ratio_interval)
N = len(ratio_interval)
HE = zeros(N)
for i, x in enumerate(ratio_interval):
P = diff(concatenate(([0], norm.cdf(log2(centers), loc=log2(x), scale=spread), [1])))
ind = P > min_tol
HE[i] = -np.sum(P[ind] * log2(P[ind]))
return weight_ratios, HE
def compute_harmonic_entropy_simple_weights(numerators, denominators, ratio_interval, spread=0.01, min_tol=1e-15):
# The first step is to pre-sort the ratios to speed up computation
ratios = numerators / denominators
ind = np.argsort(ratios)
numerators = numerators[ind]
denominators = denominators[ind]
weight_ratios = ratios[ind]
ratio_interval = array(ratio_interval)
N = len(ratio_interval)
HE = zeros(N)
for i, x in enumerate(ratio_interval):
P = norm.pdf(log2(weight_ratios), loc=log2(x), scale=spread) / sqrt(numerators * denominators)
ind = P > min_tol
P = P[ind]
P /= np.sum(P)
HE[i] = -np.sum(P * log2(P))
return weight_ratios, HE
def harmonic_entropy (ratios, res = 0.001, spread = 0.01, plot_entropy = True, plot_tenney = False, octave = 2):
'''
Harmonic entropy is a measure of the uncertainty in pitch perception, and it provides a physical correlate of tonalness,
one aspect of the psychoacoustic concept of dissonance (Sethares). High tonalness corresponds to low entropy and low tonalness
corresponds to high entropy.
ratios: List (float)
ratios between each pairs of frequency peaks
res: float
Defaults to 0.001
resolution of the ratio steps
spread: float
Default to 0.01
plot_entropy: boolean
Defaults to True
When set to True, plot the harmonic entropy curve
plot_tenney: boolean
Defaults to False
When set to True, plot the tenney heights (y-axis) across ratios (x-axis)
octave: int
Defaults to 2
Value of the maximum interval ratio
Returns
----------
HE_minima: List (float)
List of ratios corresponding to minima of the harmonic entropy curve
HE: float
Value of the averaged harmonic entropy
'''
fracs, numerators, denominators = scale2frac(ratios)
ratios = numerators / denominators
#print(ratios)
#ratios = np.interp(ratios, (ratios.min(), ratios.max()), (1, 10))
bendetti_heights = numerators * denominators
tenney_heights = log2(bendetti_heights)
ind = np.argsort(tenney_heights) # first, sort by Tenney height to make things more efficient
bendetti_heights = bendetti_heights[ind]
tenney_heights = tenney_heights[ind]
numerators = numerators[ind]
denominators = denominators[ind]
#ratios = ratios[ind]
if plot_tenney == True:
fig = plt.figure(figsize=(10, 4), dpi=150)
ax = fig.add_subplot(111)
# ax.scatter(ratios, 2**tenney_heights, s=1)
ax.scatter(ratios, tenney_heights, s=1, alpha=.2)
# ax.scatter(ratios[:200], tenney_heights[:200], s=1, color='r')
plt.show()
# Next, we need to ensure a distance `d` between adjacent ratios
M = len(bendetti_heights)
delta = 0.00001
indices = ones(M, dtype=bool)
for i in range(M - 2):
ind = abs(ratios[i + 1:] - ratios[i]) > delta
indices[i + 1:] = indices[i + 1:] * ind
bendetti_heights = bendetti_heights[indices]
tenney_heights = tenney_heights[indices]
numerators = numerators[indices]
denominators = denominators[indices]
ratios = ratios[indices]
M = len(tenney_heights)
#print(M)
#print('hello')
x_ratios = arange(1, octave, res)
_, HE = compute_harmonic_entropy_domain_integral(ratios, x_ratios, spread=spread)
#_, HE = compute_harmonic_entropy_simple_weights(numerators, denominators, x_ratios, spread=0.01)
ind = argrelextrema(HE, np.less)
HE_minima = (x_ratios[ind], HE[ind])
if plot_entropy == True:
fig = plt.figure(figsize=(10, 4), dpi=150)
ax = fig.add_subplot(111)
# ax.plot(weight_ratios, log2(pdf))
ax.plot(x_ratios, HE)
# ax.plot(x_ratios, HE_simple)
ax.scatter(HE_minima[0], HE_minima[1], color='k', s=4)
ax.set_xlim(1, octave)
plt.show()
return HE_minima, np.average(HE)
'''Scale reduction'''
def scale_reduction (scale, mode_n_steps, function, rounding = 4):
'''
Function that reduces the number of steps in a scale according to the consonance between pairs of ratios
scale: List (float)
scale to reduce
mode_n_steps: int
number of steps of the reduced scale
function: function
function used to compute the consonance between pairs of ratios
Choose between: consonance, dyad_similarity, metric_denom
'''
metric_values = []
mode_values = []
for index1 in range(len(scale)):
for index2 in range(len(scale)):
if scale[index1] > scale[index2]: #not include the diagonale in the computation of the avg. consonance
entry = scale[index1]/scale[index2]
#print(entry_value, scale[index1], scale[index2])
mode_values.append([scale[index1], scale[index2]])
#if function == metric_denom:
# metric_values.append(int(function(sp.Rational(entry).limit_denominator(1000))))
#else:
metric_values.append(function(entry))
if function == metric_denom:
cons_ratios = [x for _, x in sorted(zip(metric_values, mode_values))]
else:
cons_ratios = [x for _, x in sorted(zip(metric_values, mode_values))][::-1]
i = 0
mode_ = []
mode_out = []
while len(mode_out) < mode_n_steps:
cons_temp = cons_ratios[i]
mode_.append(cons_temp)
mode_out_temp = [item for sublist in mode_ for item in sublist]
mode_out_temp = [np.round(x, rounding) for x in mode_out_temp]
mode_out = sorted(set(mode_out_temp), key = mode_out_temp.index)[0:mode_n_steps]
i +=1
mode_metric = []
for index1 in range(len(mode_out)):
for index2 in range(len(mode_out)):
if mode_out[index1] > mode_out[index2]:
entry = mode_out[index1]/mode_out[index2]
#if function == metric_denom:
# mode_metric.append(int(function(sp.Rational(entry).limit_denominator(1000))))
#else:
mode_metric.append(function(entry))
return np.average(metric_values), mode_out, np.average(mode_metric)
'''------------------------------------------------------Peaks extraction--------------------------------------------------------------'''
import emd
from PyEMD import EMD, EEMD
from scipy.signal import butter, lfilter
import colorednoise as cn
#PEAKS FUNCTIONS
#HH1D_weightAVG (Hilbert-Huang 1D): takes the average of all the instantaneous frequencies weighted by power
#HH1D_max: takes the frequency bin that has the maximum power value
def compute_peaks_ts (data, peaks_function = 'EMD', FREQ_BANDS = None, precision = 0.25, sf = 1000, max_freq = 80):
alphaband = [[7, 12]]
try:
if FREQ_BANDS == None:
FREQ_BANDS = [[2, 3.55], [3.55, 7.15], [7.15, 14.3], [14.3, 28.55], [28.55, 49.4]]
except:
pass
if peaks_function == 'EEMD':
IMFs = EMD_eeg(data)[1:6]
if peaks_function == 'EMD':
data = np.interp(data, (data.min(), data.max()), (0, +1))
IMFs = emd.sift.sift(data)
#IMFs = emd.sift.ensemble_sift(data)
IMFs = np.moveaxis(IMFs, 0, 1)[1:6]
try:
peaks_temp = []
amps_temp = []
for imf in range(len(IMFs)):
p, a = compute_peak(IMFs[imf], precision = precision, average = 'median')
#print(p)
peaks_temp.append(p)
amps_temp.append(a)
peaks_temp = np.flip(peaks_temp)
amps_temp = np.flip(amps_temp)
except:
pass
if peaks_function == 'HH1D_max':
IMFs = EMD_eeg(data)
IMFs = np.moveaxis(IMFs, 0, 1)
IP, IF, IA = emd.spectra.frequency_transform(IMFs[:, 1:6], sf, 'nht')
precision_hh = precision*2
low = 1
high = max_freq
steps = int((high-low)/precision_hh)
edges, bins = emd.spectra.define_hist_bins(low, high, steps, 'log')
# Compute the 1d Hilbert-Huang transform (power over carrier frequency)
spec = emd.spectra.hilberthuang_1d(IF, IA, edges)
spec = np.moveaxis(spec, 0, 1)
peaks_temp = []
amps_temp = []
for e, i in enumerate(spec):
max_power = np.argmax(i)
peaks_temp.append(bins[max_power])
amps_temp.append(spec[e][max_power])
peaks_temp = np.flip(peaks_temp)
amps_temp = np.flip(amps_temp)
#if peaks_function == 'HH1D_weightAVG':
if peaks_function == 'adapt':
p, a = compute_peaks_raw(data, alphaband, precision = precision, average = 'median')
FREQ_BANDS = alpha2bands(p)
peaks_temp, amps_temp = compute_peaks_raw(data, FREQ_BANDS, precision = precision, average = 'median')
if peaks_function == 'fixed':
peaks_temp, amps_temp = compute_peaks_raw(data, FREQ_BANDS, precision = precision, average = 'median')
peaks = np.array(peaks_temp)
amps = np.array(amps_temp)
return peaks, amps
def extract_all_peaks (data, sf, precision, max_freq = None):
if max_freq == None:
max_freq = sf/2
mult = 1/precision
nperseg = sf*mult
nfft = nperseg
freqs, psd = scipy.signal.welch(data, sf, nfft = nfft, nperseg = nperseg, average = 'median')
psd = 10. * np.log10(psd)
indexes = ss.find_peaks(psd, height=None, threshold=None, distance=10, prominence=None, width=2, wlen=None, rel_height=0.5, plateau_size=None)
peaks = []
amps = []
for i in indexes[0]:
peaks.append(freqs[i])
amps.append(psd[i])
peaks = np.around(np.array(peaks), 5)
peaks = list(peaks)
peaks = [p for p in peaks if p<=max_freq]
return peaks, amps
def harmonic_peaks_fit (peaks, amps, min_freq = 0.5, max_freq = 30, min_harms = 2, harm_limit = 128):
n_total = []
harm_ = []
harm_peaks = []
max_n = []
max_peaks = []
max_amps = []
harmonics = []
harmonic_peaks = []
harm_peaks_fit = []
for p, a in zip(peaks, amps):
n = 0
harm_temp = []
harm_peaks_temp = []
if p < max_freq and p > min_freq:
for p2 in peaks:
if p2 == p:
ratio = 0.1 #arbitrary value to set ratio value to non integer
if p2 > p:
ratio = p2/p
harm = ratio
if p2 < p:
ratio = p/p2
harm = -ratio
if ratio.is_integer():
if harm <= harm_limit:
n += 1
harm_temp.append(harm)
if p not in harm_peaks_temp:
harm_peaks_temp.append(p)
if p2 not in harm_peaks_temp:
harm_peaks_temp.append(p2)
n_total.append(n)
harm_.append(harm_temp)
harm_peaks.append(harm_peaks_temp)
if n >= min_harms:
max_n.append(n)
max_peaks.append(p)
max_amps.append(a)
#print(harm_temp)
harmonics.append(harm_temp)
harmonic_peaks.append(harm_peaks)
harm_peaks_fit.append([p, harm_temp, harm_peaks_temp])
for i in range(len(harm_peaks_fit)):
harm_peaks_fit[i][2] = sorted(harm_peaks_fit[i][2])
max_n = | np.array(max_n) | numpy.array |
#!/usr/bin/ python3
print('''\x1b[32m
██████╗ █████╗ ███╗ ███╗ █████╗ ███╗ ██╗███████╗████████╗
██╔══██╗██╔══██╗████╗ ████║██╔══██╗████╗ ██║██╔════╝╚══██╔══╝
██████╔╝███████║██╔████╔██║███████║██╔██╗ ██║█████╗ ██║
██╔══██╗██╔══██║██║╚██╔╝██║██╔══██║██║╚██╗██║██╔══╝ ██║
██║ ██║██║ ██║██║ ╚═╝ ██║██║ ██║██║ ╚████║███████╗ ██║
╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═══╝╚══════╝ ╚═╝\x1b[35m
╔╦╗┌─┐ ┌┐┌┌─┐┬ ┬┌─┐ ╔═╗┬─┐┌─┐┌┬┐┌─┐┬┌┐┌ ╔╦╗┌─┐┌─┐┬┌─┐┌┐┌
║║├┤ ││││ │└┐┌┘│ │ ╠═╝├┬┘│ │ │ ├┤ ││││ ║║├┤ └─┐││ ┬│││
═╩╝└─┘ ┘└┘└─┘ └┘ └─┘ ╩ ┴└─└─┘ ┴ └─┘┴┘└┘ ═╩╝└─┘└─┘┴└─┘┘└┘
\u001b[31mAuthors: \x1b[33m<NAME> and <NAME>
\u001b[31mDate: \x1b[33m31-May-2017
\u001b[31mCorrespondace: \x1b[33<EMAIL>
\u001b[31mURL: \x1b[33mhttps://sarisabban.github.io/RamaNet
\x1b[36m---------------------------------------------------------\x1b[0m''')
import os
import re
import sys
import h5py
import time
import glob
import math
import tqdm
import gzip
import keras
import random
import sklearn
import Bio.PDB
import datetime
import warnings
import argparse
import numpy as np
import pandas as pd
import tensorflow as tf
from pyrosetta import *
from pyrosetta.toolbox import *
from keras.optimizers import Adam
from keras.models import Sequential, Model
from keras.losses import BinaryCrossentropy
from keras.layers.convolutional import Conv2D
from keras.layers import Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers import Input, Dense, Reshape, Flatten
from keras.layers import UpSampling2D, BatchNormalization
from keras.layers import Dropout, GlobalMaxPooling2D, Conv2DTranspose
# Silence Tensorflow, Keras, and initialise PyRosetta
def warn(*args, **kwargs): pass
warnings.warn = warn
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
init('-out:level 0')
print('\x1b[36m--------------------------------------------------------\x1b[0m')
# Setup arguments
parser = argparse.ArgumentParser(description='De Novo Protein Design Neural Network')
parser.add_argument('-d', '--dataset', nargs='+', metavar='', help='Build the Backbone or Sequence datasets')
parser.add_argument('-f', '--frag', action='store_true', help='Build the Fragment dataset')
parser.add_argument('-tb', '--TrainBack', action='store_true', help='Train the Backbone neural network')
parser.add_argument('-tf', '--TrainFrag', action='store_true', help='Train the Fragment neural network')
parser.add_argument('-ts', '--TrainSeq', action='store_true', help='Train the Sequence neural network')
args = parser.parse_args()
class Dataset():
''' Build a machine learning dataset of protein structures '''
def Database(self, TempDIR, FinalDIR):
'''
Downloads the entire PDB database from https://www.wwpdb.org/
moves all files into one directory, then uncompresses all the files
Generates a directory which contains all .PDB structure files
'''
print('\x1b[33m[.] Downloading PDB database...\x1b[0m')
web = 'rsync.wwpdb.org::ftp/data/structures/divided/pdb/'
os.system('rsync -rlpt -q -v -z --delete --port=33444 {} {}'
.format(web, TempDIR))
print('\x1b[32m[+] Download complete\x1b[0m')
os.mkdir(FinalDIR)
filelist = os.listdir(TempDIR)
print('\x1b[33m[.] Moving files...\x1b[0m')
for directories in tqdm.tqdm(filelist):
files = os.listdir('{}/{}'.format(TempDIR, directories))
for afile in files:
location = ('{}/{}/{}'.format(TempDIR, directories, afile))
os.rename(location, '{}/{}'.format(FinalDIR, afile))
os.system('rm -r ./{}'.format(TempDIR))
print('\x1b[32m[+] Moving complete\x1b[0m')
def Extract(self, directory):
'''
Extracts all the .ent.gz files and separate all chains and save them
into seperate .pdb files. Replaces each .ent.gz file with the .pdb
file of each chain
'''
print('\x1b[33m[.] Extracting files...\x1b[0m')
current = os.getcwd()
pdbfilelist = os.listdir(directory)
io = Bio.PDB.PDBIO()
os.chdir(directory)
for TheFile in tqdm.tqdm(pdbfilelist):
try:
TheName = TheFile.split('.')[0].split('pdb')[1].upper()
InFile = gzip.open(TheFile, 'rt')
structure = Bio.PDB.PDBParser(QUIET=True)\
.get_structure(TheName, InFile)
count = 0
for chain in structure.get_chains():
io.set_structure(chain)
io.save(structure.get_id()+'_'+chain.get_id()+'.pdb')
os.remove(TheFile)
except Exception as TheError:
print('\x1b[31m[-] Failed to extract\t{}\x1b[33m: {}\x1b[0m'
.format(TheFile.upper(), str(TheError)))
os.remove(TheFile)
os.chdir(current)
def NonProtein(self, directory):
''' Remove non-protein structures '''
print('\x1b[33m[.] Deleting none-protein structures...\x1b[0m')
current = os.getcwd()
pdbfilelist = os.listdir(directory)
os.chdir(directory)
for TheFile in tqdm.tqdm(pdbfilelist):
try:
structure = Bio.PDB.PDBParser(QUIET=True)\
.get_structure('X', TheFile)
ppb = Bio.PDB.Polypeptide.PPBuilder()
Type = ppb.build_peptides(structure, aa_only=True)
if Type == []: os.remove(TheFile)
else: continue
except: os.remove(TheFile)
os.chdir(current)
def Size(self, directory, Size_From, Size_To):
''' Remove structures not within defined size '''
print('\x1b[33m[.] Removing unwanted structure sizes...\x1b[0m')
current = os.getcwd()
pdbfilelist = os.listdir(directory)
os.chdir(directory)
for TheFile in tqdm.tqdm(pdbfilelist):
try:
parser = Bio.PDB.PDBParser()
structure = parser.get_structure('X', TheFile)
model = structure[0]
dssp = Bio.PDB.DSSP(model, TheFile, acc_array='Wilke')
for aa in dssp: length = aa[0]
if length >= int(Size_To) or length <= int(Size_From):
os.remove(TheFile)
except: print('\x1b[31m[-] Error in finding protein size\x1b[0m')
os.chdir(current)
def Break(self, directory):
''' Remove structures with a broken (non-continuous) chains '''
print('\x1b[33m[.] Removing non-continuous structures...\x1b[0m')
current = os.getcwd()
pdbfilelist = os.listdir(directory)
os.chdir(directory)
for TheFile in tqdm.tqdm(pdbfilelist):
structure = Bio.PDB.PDBParser(QUIET=True)\
.get_structure('X', TheFile)
ppb = Bio.PDB.Polypeptide.PPBuilder()
Type = ppb.build_peptides(structure, aa_only=True)
try:
x = Type[1]
os.remove(TheFile)
except: continue
os.chdir(current)
def Loops(self, directory, LoopLength):
'''
Remove structures that have loops that are larger than a
spesific length
'''
print('\x1b[33m[.] Removing structures with long loops...\x1b[0m')
current = os.getcwd()
pdbfilelist = os.listdir(directory)
os.chdir(directory)
for TheFile in tqdm.tqdm(pdbfilelist):
try:
parser = Bio.PDB.PDBParser()
structure = parser.get_structure('X', TheFile)
model = structure[0]
dssp = Bio.PDB.DSSP(model, TheFile, acc_array='Wilke')
SS = list()
for res in dssp:
ss = res[2]
if ss == '-' or ss == 'T' or ss == 'S': SS.append('L')
else: SS.append('.')
loops = ''.join(SS).split('.')
loops = [item for item in loops if item]
LargeLoop = None
for item in loops:
if len(item) <= LoopLength: continue
else: LargeLoop = 'LargeLoop'
if LargeLoop == 'LargeLoop': os.remove(TheFile)
else: continue
except: os.remove(TheFile)
os.chdir(current)
def Renumber(self, directory):
''' Renumber structures starting at 1 '''
print('\x1b[33m[.] Renumbering structures...\x1b[0m')
current = os.getcwd()
pdbfilelist = os.listdir(directory)
os.chdir(directory)
for TheFile in tqdm.tqdm(pdbfilelist):
pdb = open(TheFile, 'r')
PDB = open(TheFile+'X', 'w')
count = 0
num = 0
AA2 = None
for line in pdb:
count += 1
AA1 = line[23:27]
if not AA1 == AA2: num += 1
final_line =line[:7]+'{:4d}'.format(count)+line[11:17]+\
line[17:21]+'A'+'{:4d}'.format(num)+line[26:]
AA2 = AA1
PDB.write(final_line)
PDB.close()
os.remove(TheFile)
os.rename(TheFile+'X', TheFile)
os.chdir(current)
def Rg(self, directory, RGcutoff):
''' Remove structures that are below the Raduis of Gyration's value '''
print('\x1b[33m[.] Removing structure low Rg values...\x1b[0m')
current = os.getcwd()
pdbfilelist = os.listdir(directory)
os.chdir(directory)
for TheFile in tqdm.tqdm(pdbfilelist):
mass = list()
Structure = open(TheFile, 'r')
for line in Structure:
line = line.split()
if line[0] == 'TER' or line[0] == 'END': continue
else:
if line[-1] == 'C': mass.append(12.0107)
elif line[-1] == 'O': mass.append(15.9994)
elif line[-1] == 'N': mass.append(14.0067)
elif line[-1] == 'S': mass.append(32.0650)
elif line[-1] == 'H': mass.append(1.00794)
else: continue
coord = list()
p = Bio.PDB.PDBParser()
structure = p.get_structure('X', TheFile)
for model in structure:
for chain in model:
for residue in chain:
for atom in residue: coord.append(atom.get_coord())
xm = [(m*i, m*j, m*k) for (i, j, k), m in zip(coord, mass)]
tmass = sum(mass)
rr = sum(mi*i + mj*j + mk*k for (i, j, k), (mi, mj, mk)\
in zip(coord, xm))
mm = sum((sum(i)/tmass)**2 for i in zip( * xm))
rg = math.sqrt(rr/tmass-mm)
if rg <= RGcutoff: os.remove(TheFile)
else: continue
os.chdir(current)
def Clean(self, directory):
''' Clean each structure within a directory '''
print('\x1b[33m[.] Cleaning structures...\x1b[0m')
os.mkdir('PDBCleaned')
current = os.getcwd()
pdbfilelist = os.listdir(directory)
os.chdir(directory)
for TheFile in tqdm.tqdm(pdbfilelist):
CurFile = open(TheFile, 'r')
NewFile = open('Clean-{}'.format(TheFile), 'a')
for line in CurFile:
if line.split()[0] == 'ATOM': NewFile.write(line)
CurFile.close()
NewFile.close()
os.system('mv Clean-{} ../PDBCleaned'.format(TheFile))
os.chdir(current)
def Path(self, directory, path):
''' Generate a file with the path to each file '''
print('\x1b[33m[.] Generating paths...\x1b[0m')
current = os.getcwd()
pdbfilelist = os.listdir(directory)
os.chdir(directory)
PathFile = open('PDB.list', 'a')
for TheFile in tqdm.tqdm(pdbfilelist):
line = '{}/PDBCleaned/{}\n'.format(path, TheFile)
PathFile.write(line)
os.system('mv PDB.list ../')
os.chdir(current)
def RelaxHPC(self, path, cores):
'''
Generate a PBS job scheduler to perform each structure
relax on a HPC
'''
HPCfile = open('relax.pbs', 'w')
HPCfile.write('#!/bin/bash\n')
HPCfile.write('#PBS -N Relax\n')
HPCfile.write('#PBS -q fat\n')
HPCfile.write('#PBS -l select=1:ncpus=1\n')
HPCfile.write('#PBS -j oe\n')
HPCfile.write('#PBS -J 1-{}\n'.format(str(cores)))
HPCfile.write('cd $PBS_O_WORKDIR\n')
HPCfile.write('mkdir PDBRelaxed\n')
HPCfile.write('cd PDBRelaxed\n')
HPCfile.write('''thefile=$(awk -v "line=${PBS_ARRAY_INDEX}"''')
HPCfile.write(''''NR == line { print; exit }' ../PDB.list)\n''')
HPCfile.write('{}/main/source/bin/'.format(path))
HPCfile.write('relax.default.linuxgccrelease')
HPCfile.write('-relax:thorough -nstruct 100 -database ')
HPCfile.write('{}/main/database -s $thefile'.format(path))
print('\x1b[32m[+] Generated HPC job submission file\x1b[0m')
def Relax(self, directory):
''' Relax each structure in a directory on a local computer '''
print('\x1b[33m[.] Relaxing structures...\x1b[0m')
os.mkdir('PDBRelaxed')
current = os.getcwd()
pdbfilelist = os.listdir(directory)
os.chdir(directory)
for TheFile in tqdm.tqdm(pdbfilelist):
for i in range(1, 101):
scorefxn = get_fa_scorefxn()
relax = pyrosetta.rosetta.protocols.relax.FastRelax()
relax.set_scorefxn(scorefxn)
pose = pose_from_pdb(TheFile)
relax.apply(pose)
pose.dump_pdb('Relaxed{}-{}'.format(i, TheFile))
os.system('mv Relaxed{}-{} ../PDBRelaxed'.format(i, TheFile))
os.chdir(current)
def C_Max(self, filename):
''' Find the maximum value of the Distance Map in a dataset '''
max_in_line = []
with open(filename, 'r') as f:
next(f)
for line in f:
line = line.strip().split(',')[1:]
line = [float(item) for item in line]
max_in_line.append(max(line))
maximum = max(max_in_line)
print('\x1b[32m[+] Contact Map maximum value: {}\x1b[0m'\
.format(maximum))
return(maximum)
def DatasetPSCM(self, directory):
'''
Compile a dataset of each residue's phi and psi angles and another
dataset of the contact map for each structure. This dataset is padded
with zeros.
'''
a = 'Compiling phi and psi angles dataset'
b = 'as well as a distance matrix dataset'
text = a+b
print('\x1b[32m{}\x1b[0m'.format(text))
# Setup dataset header for angles
headerPS = ['PDB_ID']
for i in range(1, 150+1):
headerPS.append(',phi_{},psi_{}'.format(i, i))
headerPS = ''.join(headerPS)
with open('./PS.csv', 'w') as headPS:
headPS.write(headerPS+'\n')
# Setup dataset header for distance matrices
headerCM = ['PDB_ID']
for r in range(1, 150+1):
for c in range(1, 150+1):
headerCM.append(',{}{}'.format(r, c))
headerCM = ''.join(headerCM)
with open('./CM.csv', 'w') as headCM:
headCM.write(headerCM+'\n')
for File in tqdm.tqdm(os.listdir(directory)):
TheFile = '{}/{}'.format(directory, File)
try:
# Compile angles
pose = pose_from_pdb(TheFile)
phi = []
psi = []
for aa in range(len(pose.residues)):
try:
p = pose.phi(aa+1)
s = pose.psi(aa+1)
if p < 0: p = p+360
if s < 0: s = s+360
phi.append(p)
psi.append(s)
except: pass
angles = []
for P, S in zip(phi, psi):
angles.append(str(round(P, 5))+','+str(round(S, 5)))
assert len(phi) == len(psi)
Angles = ','.join(angles)
if len(angles) >= 150: AngLine = Angles
else:
addition = 150-len(angles)
zeros = []
for adds in range(addition): zeros.append('0.0,0.0')
Zeros = ','.join(zeros)
AngLine = '{},{}'.format(Angles, Zeros)
ThePSLine = '{},{}\n'.format(File, AngLine)
with open('PS.csv', 'a') as PSdata:
PSdata.write(ThePSLine)
#Compile contact map (Ca-Ca contact <= 12 angstroms)
BIO = Bio.PDB.PDBParser(QUIET=True)
structure = BIO.get_structure('X', TheFile)
ppb = Bio.PDB.Polypeptide.PPBuilder()
Type = ppb.build_peptides(structure, aa_only=False)
model = Type
chain = model[0]
CM = []
for aa1 in range(0, 150):
for aa2 in range(0, 150):
try:
residue1 = chain[aa1]
residue2 = chain[aa2]
atom1 = residue1['CA']
atom2 = residue2['CA']
if atom1-atom2 <= 12: CM.append(str(atom1-atom2))
else: CM.append(str(0))
except:
CM.append(str(0))
assert len(CM) == 22500
ContactMap = ','.join(CM)
TheCMLine = '{},{}\n'.format(File, ContactMap)
with open('CM.csv', 'a') as CMdata:
CMdata.write(TheCMLine)
except: pass
def VectorisePSCM(self, PS_file='PS.csv',
CM_file='CM.csv',
C_MAX=12,
fp=np.float64):
'''
This function vectorises the backbone PS and CM datasets, normalises
them, combines them, as well as constructs the final tensor and
export the result as a serial.
'''
# 1. Import a single row of PS dataset
with open(PS_file) as PSf:
next(PSf)
P, S = [], []
for line in PSf:
# 2. Isolate different angles
line = line.strip().split(',')
p = [float(item) for item in line[1::2]]
s = [float(item) for item in line[2::2]]
assert len(p) == len(s)
P.append(np.array(p, dtype=fp))
S.append(np.array(s, dtype=fp))
with open(CM_file) as CMf:
next(CMf)
CM = []
for line in CMf:
# 3. Isolate different points
line = [float(item) for item in line.strip().split(',')[1:]]
cm = np.reshape(line, (150, 150))
CM.append(np.array(cm, dtype=fp))
# 4. Construct PS matrices
P = np.array(P)
S = np.array(S)
# 5. Normalise PS angles (min/max) [-1, 1]
P /= 180
S /= 180
P -= 1
S -= 1
PS = np.array([P, S])
PS = np.swapaxes(PS, 0, 2)
PS = np.swapaxes(PS, 0, 1)
# 6. Construct CM matrices
CM = np.array(CM)
# 7. Normalise CM contact map (min/max) [-1, 1]
CM /= (C_MAX/2)
CM -= 1
# 8. Construct final dataset matrix
dataset = np.concatenate([PS, CM], axis=2)
# 9. Suffle dataset
sklearn.utils.shuffle(dataset)
# 10. Serialise tensors
with h5py.File('PS+CM.h5', 'w') as data:
dataset = data.create_dataset('default', data=dataset)
def DatasetAsPSaM(self, directory):
'''
Compile a dataset of each residue's amino acid identify, secondary
structure, phi angle, psi angle, solvent accessible surface area as
a .csv file and the contact map as a separate .csv file. to be run
after clean() on the ./cleaned directory, also outputs a file
identifying the sizes of structures, so the largest value can be used
with HeaderAsPSaM()
'''
os.makedirs('./Completed', exist_ok=True)
os.makedirs('./Error_NotEqual', exist_ok=True)
os.makedirs('./Error_Broken', exist_ok=True)
os.makedirs('./Error_Small', exist_ok=True)
for File in tqdm.tqdm(os.listdir(directory)):
try:
TheFile = '{}/{}'.format(directory, File)
pose = pose_from_pdb(TheFile)
DSSP = pyrosetta.rosetta.protocols.moves.DsspMover()
DSSP.apply(pose)
sasa_calc = pyrosetta.rosetta.core.scoring.sasa.SasaCalc()
sasa_calc.calculate(pose)
size = pose.total_residue()
aa = []
ss = []
phi = []
psi = []
sasa = []
info = []
ctmp = []
m = []
surf = list(sasa_calc.get_residue_sasa())
for r in range(size):
if pose.residue(r+1).is_protein():
aa.append(pose.sequence(r+1, r+1))
ss.append(pose.secstruct(r+1))
p = pose.phi(r+1)
if p < 0: p = p+360
phi.append(p)
s = pose.psi(r+1)
if s < 0: s = s+360
psi.append(s)
sasa.append(surf[r])
for r in range(0, size):
for R in range(0, size):
if pose.residue(r+1).is_protein() and\
pose.residue(R+1).is_protein():
CAr = pose.residue(r+1).xyz('CA')
CAR = pose.residue(R+1).xyz('CA')
CAr_CAR_vector = CAR-CAr
Cont = CAr_CAR_vector.norm()
if Cont <= 12: ctmp.append(Cont)
else: ctmp.append(0)
if len(aa) >= 50:
try:
assert len(aa) == len(ss) == len(phi)\
== len(psi) == len(sasa) == math.sqrt(len(ctmp))
for AA,SS,P,S,SASA in zip(aa,ss,phi,psi,sasa):
info.append('{},{},{},{},{}'\
.format(AA, SS, P, S, SASA))
Info = ','.join(info)
with open('./AsPSa_noheader_nofill.csv', 'a') as data:
data.write(File + ',' + Info + '\n')
with open('lengths.txt', 'a') as length:
length.write(str(len(aa))+',')
for x in ctmp:
m.append('{}'.format(x))
M = ','.join(m)
with open('./M_noheader_nofill.csv', 'a') as data:
data.write(File + ',' + M + '\n')
os.system('mv {} ./Completed'.format(TheFile))
except:
os.system('mv {} ./Error_NotEqual'\
.format(TheFile))
else: os.system('mv {} ./Error_Small'.format(TheFile))
except: passos.system('mv {} ./Error_Broken'.format(TheFile))
def HeaderAsPSaM(self, choice='AsPSa'):
'''
Constructs a .csv header and completes the dataset. To find the value of
the largest structure run: sort -nk 1 lengths.txt
'''
with open('lengths.txt', 'r') as L:
length = int(max(L.readlines()[0].strip().split(',')))
header = ['PDB_ID']
if choice == 'AsPSa':
for i in range(1, length+1):
header.append(',aa_{},ss_{},phi_{},psi_{},sasa_{}'\
.format(i, i, i, i, i))
header = ''.join(header)
with open('./AsPSa_noheader_nofill.csv', 'r') as data:
with open('./AsPSa_nofill.csv', 'w') as head:
head.write(header+'\n')
for line in data:
head.write(line)
os.remove('./AsPSa_noheader_nofill.csv')
elif choice == 'M':
for r in range(1, length+1):
for c in range(1, length+1):
header.append(',{}{}'.format(r, c))
header = ''.join(header)
with open('./M_noheader_nofill.csv', 'r') as data:
with open('./M_nofill.csv', 'w') as head:
head.write(header+'\n')
for line in data:
head.write(line)
os.remove('./M_noheader_nofill.csv')
def Fill(self, filename):
''' Fills missing .csv table spaces with zeros '''
name = filename.split('_')[0]
with open(filename) as f:
with open(name+'.csv', 'a') as F:
first_line = f.readline()
F.write(first_line)
size = len(first_line.strip().split(','))
for line in f:
line = line.strip().split(',')
gap = size - len(line)
for zero in range(gap):
line.append('0')
new_line = ','.join(line)
F.write(new_line + '\n')
os.remove(filename)
def VectoriseAsPSaM(self, filenameA='AsPSa.csv', filenameM='M.csv'):
'''
This function vectorises the backbone PS and CM datasets, normalises
them, combines them, as well as constructs the final tensor and
export the result as a serial.
'''
pass
def build(self, switches='', directory='PDBDatabase'):
if len(switches) == 20:
switch = list(switches)
if switch[0] == '1': self.Database('DATABASE', directory)
if switch[1] == '1': self.Extract(directory)
if switch[2] == '1': self.NonProtein(directory)
if switch[3] == '1': self.Size(directory, 80, 150)
if switch[4] == '1': self.Break(directory)
if switch[5] == '1': self.Loops(directory, 10)
if switch[6] == '1': self.Renumber(directory)
if switch[7] == '1': self.Rg(directory, 15)
########## --- HUMAN EYE FILTERING --- ##########
if switch[8] == '1': self.Clean(directory)
if switch[9] == '1': self.Path('PDBCleaned', '{PATH}')
if switch[10] == '1': self.RelaxHPC('~/Rosetta', 829)
if switch[11] == '1': self.Relax('PDBCleaned')
if switch[12] == '1': self.DatasetAsPSaM('PDBCleaned')
if switch[13] == '1': self.HeaderAsPSaM('AsPSa')
if switch[14] == '1':
self.HeaderAsPSaM('M')
os.remove('lengths.txt')
if switch[15] == '1':
self.Fill('AsPSa_nofill.csv')
self.Fill('M_nofill.csv')
if switch[16] == '1': self.DatasetPSCM('PDBCleaned')
if switch[17] == '1': self.C_Max('dataset_CM.csv')
if switch[18] == '1': self.VectorisePSCM()
if switch[18] == '1': self.VectoriseAsPSaM()
else: print('\x1b[31m[-] Error\x1b[33m: Wrong string length\x1b[0m')
def Vall(filename='vall.jul19.2011', m=16800, nx=1490):
'''
Compile the PDB IDs, chains, phi, psi, omega, and SASA of all the structures
from the Rosetta vall.jul19.2011 database into a .csv file
'''
assert os.path.isfile('./{}'.format(filename)),\
'Make sure the vall.jul19.2011 file is in the same directory as this script'
with open(filename, 'r') as f:
with open('Fragments.csv', 'w') as F:
header = ['PDBID,Chain']
for i in range(1, nx+1):
header.append(',AA_{},SS_{},P_{},S_{},O_{},SASA_{}'\
.format(i, i, i, i, i, i))
header = ''.join(header)
F.write(header + '\n')
for i in range(30): next(f)
ID = []
CH = []
AA = []
SS = []
P = []
S = []
O = []
SASA= []
ID_seen = set()
for line in f:
line = line.strip().split()
if line[0] not in ID_seen:
exp = []
for aa, ss, p, s, o, sasa in zip(AA, SS, P, S, O, SASA):
exp.append('{},{},{},{},{},{}'\
.format(aa, ss, p, s, o, sasa))
exp = ','.join(exp)
if exp == '': pass
else: F.write(ID + ',' + CH + ',' + exp + '\n')
ID = None
CH = None
AA = []
SS = []
P = []
S = []
O = []
SASA = []
ID_seen.add(line[0])
ID = line[0][:4].upper()
CH = line[0][-1].upper()
AA.append(line[1])
SS.append(line[2])
P.append(line[14])
S.append(line[15])
O.append(line[16])
SASA.append(line[19])
else:
ID = line[0][:4].upper()
CH = line[0][-1].upper()
AA.append(line[1])
SS.append(line[2])
P.append(line[14])
S.append(line[15])
O.append(line[16])
SASA.append(line[19])
exp = []
for aa, ss, p, s, o, sasa in zip(AA, SS, P, S, O, SASA):
exp.append('{},{},{},{},{},{}'\
.format(aa, ss, p, s, o, sasa))
exp = ','.join(exp)
F.write(ID + ',' + CH + ',' + exp)
def Frag_vectorise(filename='Fragments.csv', nx=1452):
''' Vectorises the fragments dataset, normalises it, then serialises it '''
# 1. Import data
rows = len(open(filename).readlines()) - 1
# 2. Generate a list of random number of rows
lines = list(range(1, rows + 1))
random.shuffle(lines)
# 3. Open CSV file
with open(filename, 'r') as File: all_lines_variable = File.readlines()
PDBID, CHAIN, X, Y = [], [], [], []
for i in tqdm.tqdm(lines):
# 4. Import data line by line
line = all_lines_variable[i]
line = line.strip().split(',')
if line[0] == '1OFD': continue # Causes an error
aa = | np.array(line[2::6]) | numpy.array |
# coding: utf-8
import numpy as np
from core import get_kernel_fn
from numpy.testing import assert_allclose
from typedefs import DTYPE
from hypothesis import given, assume
from hypothesis.extra.numpy import arrays
from hypothesis.strategies import floats, integers, one_of, composite
NUMBERS = one_of(floats(allow_infinity = False, allow_nan = False), integers())
EPSILON = np.sqrt(np.finfo(float).eps) # 1.4901161193847656e-08
# KNOWN_KERNELS = {'bartlett', 'epanechnikov', 'uniform',
# 'biweight', 'quartic', 'tricube',
# 'triweight', 'cosine'}
# dists = np.array([-1, 0, 1, 2, 3], dtype = DTYPE)
# cutoff = 3
@composite
def generate_dists_cutoff(draw):
N = draw(integers(min_value = 0, max_value = 200))
dists = draw(arrays(DTYPE, (N,), elements = NUMBERS))
cutoff = draw(floats(min_value = 10 * EPSILON,
allow_infinity = False, allow_nan = False))
dists = dists[np.abs(dists) <= cutoff]
return (dists, cutoff)
@given(generate_dists_cutoff())
def test_bartlett(dists_and_cutoff):
dists, cutoff = dists_and_cutoff
kernel_fn = get_kernel_fn('bartlett')
claimed_weights = kernel_fn(dists, cutoff)
correct_weights = 1 - np.abs(dists / cutoff)
assert_allclose(claimed_weights, correct_weights, atol = 0.00001)
@given(generate_dists_cutoff())
def test_epanechnikov(dists_and_cutoff):
dists, cutoff = dists_and_cutoff
kernel_fn = get_kernel_fn('epanechnikov')
claimed_weights = kernel_fn(dists, cutoff)
correct_weights = 3 / 4 * (1 - (dists / cutoff)**2) * (np.abs(dists) < cutoff)
assert_allclose(claimed_weights, correct_weights, atol = 0.00001)
@given(generate_dists_cutoff())
def test_cosine(dists_and_cutoff):
dists, cutoff = dists_and_cutoff
kernel_fn = get_kernel_fn('cosine')
claimed_weights = kernel_fn(dists, cutoff)
correct_weights = (np.pi / 4 * ( | np.cos(np.pi / 2 * (dists / cutoff)) | numpy.cos |
import numpy as np
from numpy import linalg as npl
from meshpy import triangle
from mesh_sphere_packing import logger, ONE_THIRD, GROWTH_LIMIT
from mesh_sphere_packing.area_constraints import AreaConstraints
# TODO : change nomenclature. Segment is used in geometry to refer to an
# : edge connecting two points. Here segment is used to refer to part
# : of a sphere surface. This is confusing...
class PSLG(object):
"""Stores geometry and topology of a Planar Straigh Line Graph."""
def __init__(self, points, edges, holes):
"""Constructs PSLG object.
:param points numpy.ndarray: array of PSLG vertex coordinates.
:param adges numpy.ndarray: array of PSLG edges (vertex topology).
:param holes numpy.ndarray: array of coordinates of holes in the PSLG.
"""
self.points = points
self.edges = edges
self.holes = holes
class BoundaryPLC(object):
"""Stores geometry and topology of a Piecewise Linear Complex forming a domain
boundary.
"""
def __init__(self, points, tris, holes):
"""Constructs BoundaryPLC object.
:param points numpy.ndarray: array of PLC vertex coordinates.
:param adges numpy.ndarray: array of PLC tris (vertex topology).
:param holes numpy.ndarray: array of coordinates of holes in the PLC.
"""
self.points = points
self.tris = tris
self.holes = holes
def build_boundary_PSLGs(domain, sphere_pieces, ds):
"""Constructs PSLGs for domain boundaries. Each boundary is represented by a
Planar Straight Line Graph consisting of set of vertices and edges corresponding
to the union of all intersection loops which lie on the boundary and all boundary
perimeter vertices and edges.
:param domain Domain: spatial domain for mesh.
:param sphere_pieces list: list of SpherePiece objects.
:param ds float: characteristic segment length.
:return: list of PSLG objects for the lower bounds along each coordinate axis.
:rtype: list.
"""
# TODO : Break up this function a bit.
def compile_points_edges(sphere_pieces):
"""Produces consolidated arrays containing all SpherePiece vertices and
edges.
:param sphere_pieces list: list of SpherePiece objects.
:return: tuple of arrays of vertex coordinates and topology.
:rtype: tuple.
"""
def build_edge_list(tris, points):
v_adj = np.zeros(2*[points.shape[0]], dtype=np.int32)
v_adj[tris[:,0], tris[:,1]] = v_adj[tris[:,1], tris[:,0]] = 1
v_adj[tris[:,1], tris[:,2]] = v_adj[tris[:,2], tris[:,1]] = 1
v_adj[tris[:,2], tris[:,0]] = v_adj[tris[:,0], tris[:,2]] = 1
return np.array(np.where(np.triu(v_adj) == 1), dtype=np.int32).T
vcount = 0
all_points = []
all_edges = []
for points, tris in [(p.points, p.tris) for p in sphere_pieces]:
edges = build_edge_list(tris, points)
edges += vcount
vcount += len(points)
all_points.append(points)
all_edges.append(edges)
return np.vstack(all_points), | np.vstack(all_edges) | numpy.vstack |
# Copyright (c) Microsoft Corporation and contributors.
# Licensed under the MIT License.
import warnings
from abc import abstractmethod
from typing import Optional
import networkx as nx
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils.validation import check_is_fitted
from ..utils import (
augment_diagonal,
import_graph,
is_almost_symmetric,
is_fully_connected,
)
from .svd import select_svd
class BaseSpectralEmbed(BaseEstimator):
"""
A base class for embedding a graph.
Parameters
----------
n_components : int or None, default = None
Desired dimensionality of output data. If "full",
n_components must be <= min(X.shape). Otherwise, n_components must be
< min(X.shape). If None, then optimal dimensions will be chosen by
``select_dimension`` using ``n_elbows`` argument.
n_elbows : int, optional, default: 2
If `n_components=None`, then compute the optimal embedding dimension using
`select_dimension`. Otherwise, ignored.
algorithm : {'full', 'truncated' (default), 'randomized'}, optional
SVD solver to use:
- 'full'
Computes full svd using ``scipy.linalg.svd``
- 'truncated'
Computes truncated svd using ``scipy.sparse.linalg.svd``
- 'randomized'
Computes randomized svd using
``sklearn.utils.extmath.randomized_svd``
n_iter : int, optional (default = 5)
Number of iterations for randomized SVD solver. Not used by 'full' or
'truncated'. The default is larger than the default in randomized_svd
to handle sparse matrices that may have large slowly decaying spectrum.
check_lcc : bool , optional (defult =True)
Whether to check if input graph is connected. May result in non-optimal
results if the graph is unconnected. Not checking for connectedness may
result in faster computation.
concat : bool, optional (default = False)
If graph(s) are directed, whether to concatenate each graph's left and right
(out and in) latent positions along axis 1.
svd_seed : int or None (default ``None``)
Only applicable for ``algorithm="randomized"``; allows you to seed the
randomized svd solver for deterministic, albeit pseudo-randomized behavior.
Attributes
----------
n_components_ : int
Dimensionality of the embedded space.
n_features_in_: int
Number of features passed to the fit method.
See Also
--------
graspologic.embed.select_svd, graspologic.embed.select_dimension
"""
def __init__(
self,
n_components=None,
n_elbows=2,
algorithm="randomized",
n_iter=5,
check_lcc=True,
concat=False,
svd_seed: Optional[int] = None,
):
self.n_components = n_components
self.n_elbows = n_elbows
self.algorithm = algorithm
self.n_iter = n_iter
self.check_lcc = check_lcc
if not isinstance(concat, bool):
msg = "Parameter `concat` is expected to be type bool"
raise TypeError(msg)
self.concat = concat
self.svd_seed = svd_seed
def _reduce_dim(self, A, directed=None):
"""
A function that reduces the dimensionality of an adjacency matrix
using the desired embedding method.
Parameters
----------
A: array-like, shape (n_vertices, n_vertices)
Adjacency matrix to embed.
"""
U, D, V = select_svd(
A,
n_components=self.n_components,
n_elbows=self.n_elbows,
algorithm=self.algorithm,
n_iter=self.n_iter,
svd_seed=self.svd_seed,
)
self.n_components_ = D.size
self.singular_values_ = D
self.latent_left_ = U @ np.diag(np.sqrt(D))
if directed is not None:
directed_ = directed
else:
directed_ = not is_almost_symmetric(A)
if directed_:
self.latent_right_ = V.T @ np.diag(np.sqrt(D))
else:
self.latent_right_ = None
@property
def _pairwise(self):
"""This is for sklearn compliance."""
return True
@abstractmethod
def fit(self, graph, y=None, *args, **kwargs):
"""
A method for embedding.
Parameters
----------
graph: np.ndarray or networkx.Graph
y : Ignored
Returns
-------
lpm : LatentPosition object
Contains X (the estimated latent positions), Y (same as X if input is
undirected graph, or right estimated positions if directed graph), and d.
See Also
--------
import_graph, LatentPosition
"""
# call self._reduce_dim(A) from your respective embedding technique.
# import graph(s) to an adjacency matrix using import_graph function
# here
return self
def _fit(self, graph, y=None):
"""
A method for embedding.
Parameters
----------
graph: np.ndarray or networkx.Graph
y : Ignored
Returns
-------
A : array-like, shape (n_vertices, n_vertices)
A graph
See Also
--------
import_graph, LatentPosition
"""
A = import_graph(graph)
if self.check_lcc:
if not is_fully_connected(A):
msg = (
"Input graph is not fully connected. Results may not"
+ "be optimal. You can compute the largest connected component by"
+ "using ``graspologic.utils.largest_connected_component``."
)
warnings.warn(msg, UserWarning)
self.n_features_in_ = A.shape[0]
return A
def _fit_transform(self, graph, *args, **kwargs):
"Fits the model and returns the estimated latent positions."
self.fit(graph, *args, **kwargs)
if self.latent_right_ is None:
return self.latent_left_
else:
if self.concat:
return | np.concatenate((self.latent_left_, self.latent_right_), axis=1) | numpy.concatenate |
import numpy as np
import cPickle
import os
class CrossValidation(object):
def __init__(self, data):
self.trainval = data
def validate(self, model, ranges, verbose):
return None
class KFoldCrossValidation(CrossValidation):
def __init__(self, data, k):
super(KFoldCrossValidation, self).__init__(data)
self.k = k
def validate(self, model, ranges, verbose=True):
print("\tPerforming {}-fold cross-validation".format(self.k))
if model.name == 'kNN':
if isinstance(ranges, xrange):
param_range = ranges
else:
raise ValueError("ranges must be an xrange instance")
accs = []
for k in param_range:
micro_acc = []
for train_data, val_data in self._get_folds():
model.fit(train_data)
predictions = model.predict(val_data['x'], k=k)
macc = np.sum(predictions == val_data['y']) / float(predictions.shape[0]) * 100.0
micro_acc.append(macc)
averaged_acc_for_k = np.mean(micro_acc)
if verbose:
print("\t\tValidation accuracy for k={1}: {0}".format(averaged_acc_for_k, k))
accs.append(averaged_acc_for_k)
best_k = np.argmax(accs) + 1
print("\tBest k is: {0}".format(best_k))
return best_k
elif model.name == 'LogisticRegression':
if isinstance(ranges, list):
param_range = ranges
else:
raise ValueError("ranges must be a list instance")
accs = []
for reg in param_range:
micro_acc = []
for train_data, val_data in self._get_folds():
model.fit(train_data, num_epochs=50, reg=reg, reinit=True, verbose=False, save_best=False)
predictions = model.predict(val_data['x'])
macc = np.sum(predictions == val_data['y']) / float(predictions.shape[0]) * 100.0
micro_acc.append(macc)
averaged_acc_for_epoch = np.mean(micro_acc)
if verbose:
print("\t\tValidation accuracy for reg={1}: {0}".format(averaged_acc_for_epoch, reg))
accs.append(averaged_acc_for_epoch)
best_reg = np.argmax(accs)
print("\tBest reg is: {0}".format(param_range[best_reg]))
return param_range[best_reg]
else:
raise NotImplementedError
def _get_folds(self):
data_size = self.trainval['x_train'].shape[0]
folds = np.array_split(np.arange(data_size), self.k)
# if the last split is unbalanced, merge with the one before the last
if folds[self.k - 1].shape[0] < folds[0].shape[0] // 2:
folds[self.k - 2] = np.concatenate([folds[self.k - 2], folds[self.k - 1]])
del folds[self.k - 1]
for i in xrange(self.k):
val_idxs = folds[i]
train_idxs = np.setdiff1d( | np.arange(data_size) | numpy.arange |
######################################
#
# <NAME> (C) 2017-Present
#
# <EMAIL>
#
#####################################
#
# the bottom part of this file is not by me (as is indicated below)
#
import numpy as np
from sklearn.utils import check_random_state
def circle(n,var,rs=1):
rs = check_random_state(rs)
xvec = np.linspace(0,2*np.pi,n)
X = np.zeros([n,2])
X[:,0] = np.cos(xvec) + rs.normal(0,var,n)
X[:,1] = np.sin(xvec) + rs.normal(0,var,n)
mu = np.zeros(2)
sigma = np.eye(2)
Y = rs.multivariate_normal(mu, sigma, size=n)
return X
######################################
#
# THE CODE BELOW IS NOT MY CODE
# SOURCE GITHUB: https://github.com/dougalsutherland/opt-mmd/blob/master/two_sample/generate.py
#####################################
def gaussian(n,corr,rs=1):
rs = check_random_state(rs)
mu = np.zeros(2)
correlation = corr
corr_sigma = | np.array([[1, correlation], [correlation, 1]]) | numpy.array |
import nibabel as nib
import numpy as np
ca_net = nib.load('../source_data/CortexSubcortex_ColeAnticevic_NetPartition_wSubcorGSR_netassignments_LR.dlabel.nii')
# rois of grayordinates in the cortex
rois = ca_net.dataobj[0].astype(int)
axis0 = ca_net.header.get_index_map(0)
nmap = list(axis0.named_maps)[0]
labels = []
rgba = []
keys = []
# left hemisphere
for i in nmap.label_table:
roi = nmap.label_table[i]
labels.append(roi.label[:-4])
keys.append(roi.key)
rgba.append((roi.red, roi.green, roi.blue, roi.alpha))
# replace shorthand labels by full names from ColeAnticevicNetPartition/network_labelfile.txt
labels = ['', 'Visual1', 'Visual2', 'Somatomotor', 'Cingulo-Opercular', 'Dorsal-attention', 'Language', 'Frontoparietal', 'Auditory', 'Default', 'Posterior-Multimodal', 'Ventral-Multimodal', 'Orbito-Affective']
labels = np.array(labels)
rgba = np.array(rgba)
keys = | np.array(keys) | numpy.array |
""" Simple maze environment
"""
import numpy as np
# import cv2 #why is this needed?
from deer.base_classes import Environment
import matplotlib
#matplotlib.use('agg')
matplotlib.use('qt5agg')
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.cm as cm
from matplotlib.patches import Circle, Rectangle
from matplotlib.offsetbox import AnchoredOffsetbox, TextArea, DrawingArea, HPacker
import copy
class MyEnv(Environment):
VALIDATION_MODE = 0
def __init__(self, rng, **kwargs):
self._mode = -1
self._mode_score = 0.0
self._mode_episode_count = 0
self._size_maze=8
self._higher_dim_obs=kwargs["higher_dim_obs"]
self.create_map()
self.intern_dim=2
def create_map(self):
self._map=np.ones((self._size_maze,self._size_maze))
self._map[-1,:]=0
self._map[0,:]=0
self._map[:,0]=0
self._map[:,-1]=0
self._map[:,self._size_maze//2]=0
self._map[self._size_maze//2,self._size_maze//2]=1
self._pos_agent=[2,2]
self._pos_goal=[self._size_maze-2,self._size_maze-2]
def reset(self, mode):
self.create_map()
self._map[self._size_maze//2,self._size_maze//2]=0
if mode == MyEnv.VALIDATION_MODE:
if self._mode != MyEnv.VALIDATION_MODE:
self._mode = MyEnv.VALIDATION_MODE
self._mode_score = 0.0
self._mode_episode_count = 0
else:
self._mode_episode_count += 1
elif self._mode != -1:
self._mode = -1
# Setting the starting position of the agent
self._pos_agent=[self._size_maze//2,self._size_maze//2]
#print ("new map:")
#print (self._map)
#print ("reset mode")
#print (mode)
return [1 * [self._size_maze * [self._size_maze * [0]]]]
def act(self, action):
"""Applies the agent action [action] on the environment.
Parameters
-----------
action : int
The action selected by the agent to operate on the environment. Should be an identifier
included between 0 included and nActions() excluded.
"""
self._cur_action=action
if(action==0):
if(self._map[self._pos_agent[0]-1,self._pos_agent[1]]==1):
self._pos_agent[0]=self._pos_agent[0]-1
elif(action==1):
if(self._map[self._pos_agent[0]+1,self._pos_agent[1]]==1):
self._pos_agent[0]=self._pos_agent[0]+1
elif(action==2):
if(self._map[self._pos_agent[0],self._pos_agent[1]-1]==1):
self._pos_agent[1]=self._pos_agent[1]-1
elif(action==3):
if(self._map[self._pos_agent[0],self._pos_agent[1]+1]==1):
self._pos_agent[1]=self._pos_agent[1]+1
# There is no reward in this simple environment
self.reward = 0
self._mode_score += self.reward
return self.reward
def summarizePerformance(self, test_data_set, learning_algo, *args, **kwargs):
""" Plot of the low-dimensional representation of the environment built by the model
"""
all_possib_inp=[] # Will store all possible inputs (=observation) for the CRAR agent
labels_maze=[]
self.create_map()
for y_a in range(self._size_maze):
for x_a in range(self._size_maze):
state=copy.deepcopy(self._map)
state[self._size_maze//2,self._size_maze//2]=0
if(state[x_a,y_a]==0):
if(self._higher_dim_obs==True):
all_possib_inp.append(self.get_higher_dim_obs([[x_a,y_a]],[self._pos_goal]))
else:
state[x_a,y_a]=0.5
all_possib_inp.append(state)
## labels
#if(y_a<self._size_maze//2):
# labels_maze.append(0.)
#elif(y_a==self._size_maze//2):
# labels_maze.append(1.)
#else:
# labels_maze.append(2.)
#arr=np.array(all_possib_inp)
#if(self._higher_dim_obs==False):
# arr=arr.reshape(arr.shape[0],-1)
#else:
# arr=arr.reshape(arr.shape[0],-1)
#
#np.savetxt('tsne_python/mazesH_X.txt',arr.reshape(arr.shape[0],-1))
#np.savetxt('tsne_python/mazesH_labels.txt',np.array(labels_maze))
all_possib_inp=np.expand_dims(np.array(all_possib_inp,dtype='float'),axis=1)
all_possib_abs_states=learning_algo.encoder.predict(all_possib_inp)
if(all_possib_abs_states.ndim==4):
all_possib_abs_states=np.transpose(all_possib_abs_states, (0, 3, 1, 2)) # data_format='channels_last' --> 'channels_first'
n=1000
historics=[]
for i,observ in enumerate(test_data_set.observations()[0][0:n]):
historics.append(np.expand_dims(observ,axis=0))
historics=np.array(historics)
abs_states=learning_algo.encoder.predict(historics)
if(abs_states.ndim==4):
abs_states=np.transpose(abs_states, (0, 3, 1, 2)) # data_format='channels_last' --> 'channels_first'
actions=test_data_set.actions()[0:n]
if self.inTerminalState() == False:
self._mode_episode_count += 1
print("== Mean score per episode is {} over {} episodes ==".format(self._mode_score / (self._mode_episode_count+0.0001), self._mode_episode_count))
m = cm.ScalarMappable(cmap=cm.jet)
x = np.array(abs_states)[:,0]
y = np.array(abs_states)[:,1]
if(self.intern_dim>2):
z = np.array(abs_states)[:,2]
fig = plt.figure()
if(self.intern_dim==2):
ax = fig.add_subplot(111)
ax.set_xlabel(r'$X_1$')
ax.set_ylabel(r'$X_2$')
else:
ax = fig.add_subplot(111,projection='3d')
ax.set_xlabel(r'$X_1$')
ax.set_ylabel(r'$X_2$')
ax.set_zlabel(r'$X_3$')
# Plot the estimated transitions
for i in range(n-1):
predicted1=learning_algo.transition.predict([abs_states[i:i+1],np.array([[1,0,0,0]])])
predicted2=learning_algo.transition.predict([abs_states[i:i+1],np.array([[0,1,0,0]])])
predicted3=learning_algo.transition.predict([abs_states[i:i+1],np.array([[0,0,1,0]])])
predicted4=learning_algo.transition.predict([abs_states[i:i+1],np.array([[0,0,0,1]])])
if(self.intern_dim==2):
ax.plot(np.concatenate([x[i:i+1],predicted1[0,:1]]), np.concatenate([y[i:i+1],predicted1[0,1:2]]), color="0.9", alpha=0.75)
ax.plot(np.concatenate([x[i:i+1],predicted2[0,:1]]), np.concatenate([y[i:i+1],predicted2[0,1:2]]), color="0.65", alpha=0.75)
ax.plot(np.concatenate([x[i:i+1],predicted3[0,:1]]), np.concatenate([y[i:i+1],predicted3[0,1:2]]), color="0.4", alpha=0.75)
ax.plot(np.concatenate([x[i:i+1],predicted4[0,:1]]), np.concatenate([y[i:i+1],predicted4[0,1:2]]), color="0.15", alpha=0.75)
else:
ax.plot(np.concatenate([x[i:i+1],predicted1[0,:1]]), np.concatenate([y[i:i+1],predicted1[0,1:2]]), np.concatenate([z[i:i+1],predicted1[0,2:3]]), color="0.9", alpha=0.75)
ax.plot(np.concatenate([x[i:i+1],predicted2[0,:1]]), np.concatenate([y[i:i+1],predicted2[0,1:2]]), np.concatenate([z[i:i+1],predicted2[0,2:3]]), color="0.65", alpha=0.75)
ax.plot(np.concatenate([x[i:i+1],predicted3[0,:1]]), np.concatenate([y[i:i+1],predicted3[0,1:2]]), np.concatenate([z[i:i+1],predicted3[0,2:3]]), color="0.4", alpha=0.75)
ax.plot(np.concatenate([x[i:i+1],predicted4[0,:1]]), np.concatenate([y[i:i+1],predicted4[0,1:2]]), np.concatenate([z[i:i+1],predicted4[0,2:3]]), color="0.15", alpha=0.75)
# Plot the dots at each time step depending on the action taken
length_block=[[0,18],[18,19],[19,31]]
for i in range(3):
colors=['blue','orange','green']
if(self.intern_dim==2):
line3 = ax.scatter(all_possib_abs_states[length_block[i][0]:length_block[i][1],0], all_possib_abs_states[length_block[i][0]:length_block[i][1],1], c=colors[i], marker='x', edgecolors='k', alpha=0.5, s=100)
else:
line3 = ax.scatter(all_possib_abs_states[length_block[i][0]:length_block[i][1],0], all_possib_abs_states[length_block[i][0]:length_block[i][1],1] ,all_possib_abs_states[length_block[i][0]:length_block[i][1],2], marker='x', depthshade=True, edgecolors='k', alpha=0.5, s=50)
if(self.intern_dim==2):
axes_lims=[ax.get_xlim(),ax.get_ylim()]
else:
axes_lims=[ax.get_xlim(),ax.get_ylim(),ax.get_zlim()]
# Plot the legend for transition estimates
box1b = TextArea(" Estimated transitions (action 0, 1, 2 and 3): ", textprops=dict(color="k"))
box2b = DrawingArea(90, 20, 0, 0)
el1b = Rectangle((5, 10), 15,2, fc="0.9", alpha=0.75)
el2b = Rectangle((25, 10), 15,2, fc="0.65", alpha=0.75)
el3b = Rectangle((45, 10), 15,2, fc="0.4", alpha=0.75)
el4b = Rectangle((65, 10), 15,2, fc="0.15", alpha=0.75)
box2b.add_artist(el1b)
box2b.add_artist(el2b)
box2b.add_artist(el3b)
box2b.add_artist(el4b)
boxb = HPacker(children=[box1b, box2b],
align="center",
pad=0, sep=5)
anchored_box = AnchoredOffsetbox(loc=3,
child=boxb, pad=0.,
frameon=True,
bbox_to_anchor=(0., 0.98),
bbox_transform=ax.transAxes,
borderpad=0.,
)
ax.add_artist(anchored_box)
#plt.show()
plt.savefig('fig_base'+str(learning_algo.update_counter)+'.pdf')
plt.close()
# # Plot the Q_vals
# c = learning_algo.Q.predict(np.concatenate((np.expand_dims(x,axis=1),np.expand_dims(y,axis=1),np.expand_dims(z,axis=1)),axis=1))
# #print "actions,C"
# #print actions
# #print c
# #c=np.max(c,axis=1)
# m1=ax.scatter(x, y, z+zrange/20, c=c[:,0], vmin=-1., vmax=1., cmap=plt.cm.RdYlGn)
# m2=ax.scatter(x, y, z+3*zrange/40, c=c[:,1], vmin=-1., vmax=1., cmap=plt.cm.RdYlGn)
#
# #plt.colorbar(m3)
# ax2 = fig.add_axes([0.85, 0.15, 0.025, 0.7])
# cmap = matplotlib.cm.RdYlGn
# norm = matplotlib.colors.Normalize(vmin=-1, vmax=1)
#
# # ColorbarBase derives from ScalarMappable and puts a colorbar
# # in a specified axes, so it has everything needed for a
# # standalone colorbar. There are many more kwargs, but the
# # following gives a basic continuous colorbar with ticks
# # and labels.
# cb1 = matplotlib.colorbar.ColorbarBase(ax2, cmap=cmap,norm=norm,orientation='vertical')
# cb1.set_label('Estimated expected return')
#
# #plt.show()
# plt.savefig('fig_w_V'+str(learning_algo.update_counter)+'.pdf')
#
#
# # fig_visuV
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
#
# x = np.array([i for i in range(5) for jk in range(25)])/4.*(axes_lims[0][1]-axes_lims[0][0])+axes_lims[0][0]
# y = np.array([j for i in range(5) for j in range(5) for k in range(5)])/4.*(axes_lims[1][1]-axes_lims[1][0])+axes_lims[1][0]
# z = np.array([k for i in range(5) for j in range(5) for k in range(5)])/4.*(axes_lims[2][1]-axes_lims[2][0])+axes_lims[2][0]
#
# c = learning_algo.Q.predict(np.concatenate((np.expand_dims(x,axis=1),np.expand_dims(y,axis=1),np.expand_dims(z,axis=1)),axis=1))
# c=np.max(c,axis=1)
# #print "c"
# #print c
#
# m=ax.scatter(x, y, z, c=c, vmin=-1., vmax=1., cmap=plt.hot())
# #plt.colorbar(m)
# fig.subplots_adjust(right=0.8)
# ax2 = fig.add_axes([0.875, 0.15, 0.025, 0.7])
# cmap = matplotlib.cm.hot
# norm = matplotlib.colors.Normalize(vmin=-1, vmax=1)
#
# # ColorbarBase derives from ScalarMappable and puts a colorbar
# # in a specified axes, so it has everything needed for a
# # standalone colorbar. There are many more kwargs, but the
# # following gives a basic continuous colorbar with ticks
# # and labels.
# cb1 = matplotlib.colorbar.ColorbarBase(ax2, cmap=cmap,norm=norm,orientation='vertical')
# cb1.set_label('Estimated expected return')
#
# #plt.show()
# plt.savefig('fig_visuV'+str(learning_algo.update_counter)+'.pdf')
#
#
# # fig_visuR
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
#
# x = np.array([i for i in range(5) for jk in range(25)])/4.*(axes_lims[0][1]-axes_lims[0][0])+axes_lims[0][0]
# y = np.array([j for i in range(5) for j in range(5) for k in range(5)])/4.*(axes_lims[1][1]-axes_lims[1][0])+axes_lims[1][0]
# z = np.array([k for i in range(5) for j in range(5) for k in range(5)])/4.*(axes_lims[2][1]-axes_lims[2][0])+axes_lims[2][0]
#
# coords=np.concatenate((np.expand_dims(x,axis=1),np.expand_dims(y,axis=1),np.expand_dims(z,axis=1)),axis=1)
# repeat_nactions_coord=np.repeat(coords,self.nActions(),axis=0)
# identity_matrix = np.diag(np.ones(self.nActions()))
# tile_identity_matrix=np.tile(identity_matrix,(5*5*5,1))
#
# c = learning_algo.R.predict([repeat_nactions_coord,tile_identity_matrix])
# c=np.max(np.reshape(c,(125,self.nActions())),axis=1)
# #print "c"
# #print c
# #mini=np.min(c)
# #maxi=np.max(c)
#
# m=ax.scatter(x, y, z, c=c, vmin=-1., vmax=1., cmap=plt.hot())
# #plt.colorbar(m)
# fig.subplots_adjust(right=0.8)
# ax2 = fig.add_axes([0.875, 0.15, 0.025, 0.7])
# cmap = matplotlib.cm.hot
# norm = matplotlib.colors.Normalize(vmin=-1, vmax=1)
#
# # ColorbarBase derives from ScalarMappable and puts a colorbar
# # in a specified axes, so it has everything needed for a
# # standalone colorbar. There are many more kwargs, but the
# # following gives a basic continuous colorbar with ticks
# # and labels.
# cb1 = matplotlib.colorbar.ColorbarBase(ax2, cmap=cmap,norm=norm,orientation='vertical')
# cb1.set_label('Estimated expected return')
#
# #plt.show()
# plt.savefig('fig_visuR'+str(learning_algo.update_counter)+'.pdf')
matplotlib.pyplot.close("all") # avoids memory leaks
def inputDimensions(self):
if(self._higher_dim_obs==True):
return [(1,self._size_maze*6,self._size_maze*6)]
else:
return [(1,self._size_maze,self._size_maze)]
def observationType(self, subject):
return np.float
def nActions(self):
return 4
def observe(self):
obs=copy.deepcopy(self._map)
obs[self._pos_agent[0],self._pos_agent[1]]=0.5
if(self._higher_dim_obs==True):
"self._pos_agent"
self._pos_agent
obs=self.get_higher_dim_obs([self._pos_agent],[self._pos_goal])
# plt.imshow(obs, cmap='gray_r')
# plt.show()
return [obs]
def get_higher_dim_obs(self,indices_agent,indices_reward):
""" Obtain the high-dimensional observation from indices of the agent position and the indices of the reward positions.
"""
obs=copy.deepcopy(self._map)
obs=obs/1.
obs=np.repeat(np.repeat(obs, 6, axis=0),6, axis=1)
#TODO for later experiment change agent obs
# agent repr
agent_obs= | np.zeros((6,6)) | numpy.zeros |
# coding: utf-8
# In[ ]:
import numpy as np
'''Handles analytically deconvoving two Gaussians and finding of a common beam.'''
def quadratic2elliptic(A,B,C,D=0,E=0,F=-np.log(2)):
"""Invert:
(A0 cos^2 phi + c0 sin^2 phi)k = A
(A0-C0)sin 2phi k = B
(a0 sin^2 phi + c0 cos^2 phi) k = C
returns bmaj,bmin,bpa[,xc,y if D,E != 0]"""
if ( | np.isinf(A) | numpy.isinf |
import numpy as np
from Space import Space
class SyntheticFunction:
def __init__(self, dim):
self.numCalls = 0
self.input_dim = dim
self.nbounds = [(0, 1)] * self.input_dim
def plot(self):
print("not implemented")
def rand_in_bounds(self):
rand = []
for i in self.bounds:
tmpRand = np.random.uniform(i[0], i[1])
rand.append(tmpRand)
return rand
def rand_uniform_in_bounds(self):
rand = []
for i in self.bounds:
tmp_rand = np.random.uniform(i[0], i[1])
rand.append(tmp_rand)
return rand
def rand_uniform_in_nbounds(self):
rand = []
for i in range(0, self.input_dim):
tmp_rand = np.random.uniform(self.nbounds[i][0], self.nbounds[i][1])
rand.append(tmp_rand)
return rand
def normalize(self, x):
val = []
for i in range(0, self.input_dim):
val.append((x[i] - self.bounds[i][0])/(self.bounds[i][1] - self.bounds[i][0]))
return val
def denormalize(self, x):
val = []
for i in range(0, self.input_dim):
val.append(1.0 * (x[i] * (self.bounds[i][1] - self.bounds[i][0]) + self.bounds[i][0]))
return val
class Eggholder(SyntheticFunction):
def __init__(self, dim, missing_rate, missing_noise, is_random_missing=False):
SyntheticFunction.__init__(self, 2) # Constructor of the parent class
self.input_dim = 2
self.numCalls = 0
# self.bounds = OrderedDict({'x1': (-5, 10), 'x2': (0, 15)})
self.bounds = [(-512, 512), (-512, 512)]
self.nbounds = [(0, 1), (0, 1)]
self.min = [512, 404.2319]
self.fmin = -959.6407
self.ismax = -1
self.name = 'Eggholder'
self.discreteIdx = []
self.categoricalIdx = []
self.miss_rate = missing_rate
self.miss_noise = missing_noise
self.is_random_missing = is_random_missing
def func(self, X):
self.numCalls += 1
X = | np.asarray(X) | numpy.asarray |
#!/usr/bin/env python
import sys
import rospy
from std_msgs.msg import String
from sensor_msgs.msg import Image
from geometry_msgs.msg import Pose, PoseStamped, Point, Point32, PolygonStamped
from cv_bridge import CvBridge, CvBridgeError
from scipy import ndimage, signal
import argparse
import os
import sys
import math
import numpy as np
import copy
import transforms3d as tf3d
import json
import copy
import keras
import tensorflow as tf
import open3d
import ros_numpy
#print(sys.path)
#sys.path.remove('/opt/ros/melodic/lib/python2.7/dist-packages')
import cv2
from PIL import Image as Pilimage
sys.path.append("/RGBDPose")
from RGBDPose import models
from RGBDPose.utils.config import read_config_file, parse_anchor_parameters
from RGBDPose.utils.eval import evaluate
from RGBDPose.utils.keras_version import check_keras_version
from RGBDPose.utils import ply_loader
from object_detector_msgs.srv import get_poses, get_posesResponse
from object_detector_msgs.msg import PoseWithConfidence
from geometry_msgs.msg import PoseArray, Pose
###################################
##### Global Variable Space #######
######## aka. death zone ##########
###################################
# LineMOD
#fxkin = 572.41140
#fykin = 573.57043
#cxkin = 325.26110
#cykin = 242.04899
# YCB-video
#fxkin = 1066.778
#fykin = 1067.487
#cxkin = 312.9869
#cykin = 241.3109
# our Kinect
#fxkin = 575.81573
#fykin = 575.81753
#cxkin = 314.5
#cykin = 235.5
# HSRB
# fxkin = 538.391033
# fykin = 538.085452
# cxkin = 315.30747
# cykin = 233.048356
# magic intrinsics
fxkin = 1066.778
fykin = 1067.487
cxkin = 320.0
cykin = 240.0
def get_evaluation_kiru(pcd_temp_,pcd_scene_,inlier_thres,tf,final_th, model_dia):#queue
tf_pcd =np.eye(4)
pcd_temp_.transform(tf)
mean_temp = np.mean(np.array(pcd_temp_.points)[:, 2])
mean_scene = np.median( | np.array(pcd_scene_.points) | numpy.array |
from unittest import TestCase
import numpy as np
from giant import rotations as at
class TestAttitude(TestCase):
def check_attitude(self, attitude, quaternion, mupdate, vupdate):
np.testing.assert_array_almost_equal(quaternion, attitude.q)
np.testing.assert_array_almost_equal(quaternion[:3], attitude.q_vector)
self.assertAlmostEqual(quaternion[-1], attitude.q_scalar)
self.assertIs(attitude._mupdate, mupdate)
self.assertIs(attitude._vupdate, vupdate)
def test_init(self):
att = at.Rotation()
self.check_attitude(att, [0, 0, 0, 1], True, True)
att = at.Rotation([0, 0, 0, 1])
self.check_attitude(att, [0, 0, 0, 1], True, True)
att = at.Rotation(data=[0, 0, 0, 1])
self.check_attitude(att, [0, 0, 0, 1], True, True)
att = at.Rotation(np.eye(3))
self.check_attitude(att, [0, 0, 0, 1], False, True)
att = at.Rotation([0, 0, 0])
self.check_attitude(att, [0, 0, 0, 1], True, False)
att = at.Rotation([np.sqrt(2) / 2, 0, 0, np.sqrt(2) / 2])
self.check_attitude(att, [np.sqrt(2)/2, 0, 0, np.sqrt(2)/2], True, True)
att = at.Rotation([np.sqrt(2) / 2, 0, 0, -np.sqrt(2) / 2])
self.check_attitude(att, [-np.sqrt(2)/2, 0, 0, np.sqrt(2)/2], True, True)
att2 = att
att = at.Rotation(att2)
self.check_attitude(att, [-np.sqrt(2)/2, 0, 0, np.sqrt(2)/2], True, True)
self.assertIs(att, att2)
# with self.assertWarns(UserWarning):
#
# at.Rotation([1, 2, 3, 4])
def test_quaternion_setter(self):
att = at.Rotation()
att._mupdate = False
att._vupdate = False
att.quaternion = [np.sqrt(2)/2, 0, 0, np.sqrt(2)/2]
self.check_attitude(att, [np.sqrt(2)/2, 0, 0, np.sqrt(2)/2], True, True)
att.quaternion = [np.sqrt(2)/2, 0, 0, -np.sqrt(2)/2]
self.check_attitude(att, [-np.sqrt(2)/2, 0, 0, np.sqrt(2)/2], True, True)
att2 = at.Rotation([1, 2, 3, 4])
att.quaternion = att2
self.check_attitude(att, np.array([1, 2, 3, 4])/np.sqrt(30), True, True)
self.assertIsNot(att, att2)
# with self.assertWarns(UserWarning):
# att.quaternion = [1, 2, 3, 4]
#
# self.check_attitude(att, np.array([1, 2, 3, 4])/np.sqrt(30), True, True)
with self.assertRaises(ValueError):
att.quaternion = np.eye(4)
def test_matrix_getter(self):
att = at.Rotation([np.sqrt(2) / 2, 0, 0, np.sqrt(2) / 2])
np.testing.assert_array_almost_equal([[1, 0, 0], [0, 0, -1], [0, 1, 0]], att.matrix)
np.testing.assert_array_almost_equal([[1, 0, 0], [0, 0, -1], [0, 1, 0]], att._matrix)
self.assertFalse(att._mupdate)
# this is bad and you should never do this but it checks that the caching is working
att._matrix = np.eye(3)
np.testing.assert_array_equal(att.matrix, np.eye(3))
self.check_attitude(att, [np.sqrt(2)/2, 0, 0, np.sqrt(2)/2], False, True)
def test_matrix_setter(self):
att = at.Rotation([1, 2, 3])
att.matrix = np.eye(3)
self.check_attitude(att, [0, 0, 0, 1], False, True)
np.testing.assert_array_equal(att._matrix, np.eye(3))
with self.assertRaises(ValueError):
att.matrix = [1, 2, 3]
def test_vector_getter(self):
att = at.Rotation([np.sqrt(2) / 2, 0, 0, np.sqrt(2) / 2])
np.testing.assert_array_almost_equal(att.vector, [np.pi/2, 0, 0])
np.testing.assert_array_almost_equal(att._vector, [np.pi/2, 0, 0])
self.assertFalse(att._vupdate)
# this is bad and you should never do this but it checks that the caching is working
att._vector = [1, 2, 3]
np.testing.assert_array_equal(att.vector, [1, 2, 3])
self.check_attitude(att, [np.sqrt(2)/2, 0, 0, np.sqrt(2)/2], True, False)
def test_vector_setter(self):
att = at.Rotation([np.sqrt(2) / 2, 0, 0, np.sqrt(2) / 2])
att.vector = [1, 2, 3]
self.check_attitude(att, [-0.25532186, -0.51064372, -0.76596558, 0.29555113], True, False)
np.testing.assert_array_equal(att.vector, [1, 2, 3])
with self.assertRaises(ValueError):
att.vector = np.eye(3)
def test_inv(self):
att = at.Rotation([np.sqrt(2) / 2, 0, 0, np.sqrt(2) / 2])
attinv = att.inv()
self.check_attitude(attinv, [-np.sqrt(2)/2, 0, 0, np.sqrt(2)/2], True, True)
self.check_attitude(att, [np.sqrt(2)/2, 0, 0, np.sqrt(2)/2], True, True)
def test_interp_attitude(self):
att = at.Rotation()
att.interp_attitude([1, 2, 3])
self.check_attitude(att, [-0.25532186, -0.51064372, -0.76596558, 0.29555113], True, False)
np.testing.assert_array_equal(att._vector, [1, 2, 3])
att.interp_attitude(np.eye(3))
self.check_attitude(att, [0, 0, 0, 1], False, True)
np.testing.assert_array_equal(att._matrix, np.eye(3))
att.interp_attitude([np.sqrt(2)/2, 0, 0, np.sqrt(2)/2])
self.check_attitude(att, [np.sqrt(2)/2, 0, 0, np.sqrt(2)/2], True, True)
att2 = at.Rotation([np.sqrt(2) / 2, 0, 0, np.sqrt(2) / 2])
att.interp_attitude(att2)
self.check_attitude(att, [np.sqrt(2)/2, 0, 0, np.sqrt(2)/2], True, True)
self.assertIsNot(att, att2)
with self.assertRaises(ValueError):
att.interp_attitude([1, 2])
def test_eq(self):
att = at.Rotation()
self.assertTrue(att == at.Rotation())
self.assertTrue(att == [0, 0, 0, 1])
self.assertTrue(att == np.eye(3))
self.assertTrue(att == [0, 0, 0])
def test_mul(self):
att = at.Rotation([1, 2, 3])
att2 = att.inv()
self.check_attitude(att*att2, [0, 0, 0, 1], True, True)
with self.assertRaises(TypeError):
_ = att*[0, 0, 0, 1]
with self.assertRaises(TypeError):
_ = [0, 0, 0, 1]*att
# def test_imul(self):
# att = at.Rotation()
# with self.assertWarns(DeprecationWarning):
#
# att *= [1, 0, 0, 0]
#
# self.check_attitude(att, [1, 0, 0, 0], True, True)
def test_rotate(self):
att = at.Rotation()
att.rotate([1, 0, 0, 0])
self.check_attitude(att, [1, 0, 0, 0], True, True)
class TestQuaternionInverse(TestCase):
def test_quaternion_inverse(self):
qinv = at.quaternion_inverse([1, 2, 3, 4])
np.testing.assert_array_equal(qinv, [-1, -2, -3, 4])
qinv = at.quaternion_inverse(at.Rotation([1, 2, 3, 4]))
np.testing.assert_array_almost_equal(qinv.q.flatten(), np.array([-1, -2, -3, 4])/np.sqrt(30))
qinv = at.quaternion_inverse([[1, 2], [2, 3], [3, 4], [4, 5]])
np.testing.assert_array_equal(qinv.T, [[-1, -2, -3, 4], [-2, -3, -4, 5]])
class TestQuaternionMultiplication(TestCase):
def test_quaternion_multiplication(self):
quat_1 = [1, 0, 0, 0]
quat_2 = [0, 1, 0, 0]
qm = at.quaternion_multiplication(quat_1, quat_2)
np.testing.assert_array_equal(np.abs(qm), [0, 0, 1, 0])
quat_1 = [[1], [0], [0], [0]]
quat_2 = [[0], [1], [0], [0]]
qm = at.quaternion_multiplication(quat_1, quat_2)
np.testing.assert_array_equal(np.abs(qm), [[0], [0], [1], [0]])
quat_1 = [[1, 0], [0, 1], [0, 0], [0, 0]]
quat_2 = [[0, 0], [1, 1], [0, 0], [0, 0]]
qm = at.quaternion_multiplication(quat_1, quat_2)
np.testing.assert_array_equal(np.abs(qm), [[0, 0], [0, 0], [1, 0], [0, 1]])
quat_1 = at.Rotation([1, 0, 0, 0])
quat_2 = at.Rotation([0, 0, 1, 0])
qm = at.quaternion_multiplication(quat_1, quat_2)
np.testing.assert_array_equal(np.abs(qm.q), [0, 1, 0, 0])
quat_1 = [np.sqrt(2)/2, 0, 0, np.sqrt(2)/2] # x=x, y=z, z=-y
quat_2 = [0, np.sqrt(2)/2, 0, np.sqrt(2)/2] # x=-z, y=y, z=x
qm = at.quaternion_multiplication(quat_1, quat_2)
np.testing.assert_array_almost_equal(np.abs(qm), [0.5, 0.5, 0.5, 0.5])
quat_1 = [0.25532186, 0.51064372, 0.76596558, -0.29555113]
quat_2 = [-0.43199286, -0.53999107, -0.64798929, -0.31922045]
qm = at.quaternion_multiplication(quat_1, quat_2)
# truth comes from matrix rotations
np.testing.assert_array_almost_equal(qm, [0.12889493, -0.16885878, 0.02972499, 0.97672373])
class TestQuaternionToRotVec(TestCase):
def test_quaternion_to_rotvec(self):
rvec = at.quaternion_to_rotvec([1, 0, 0, 0])
np.testing.assert_array_almost_equal(rvec, [np.pi, 0, 0])
rvec = at.quaternion_to_rotvec(at.Rotation([-1, 0, 0, 0]))
np.testing.assert_array_almost_equal(rvec, [-np.pi, 0, 0])
rvec = at.quaternion_to_rotvec([0, 1, 0, 0])
np.testing.assert_array_almost_equal(rvec, [0, np.pi, 0])
rvec = at.quaternion_to_rotvec([0, -1, 0, 0])
np.testing.assert_array_almost_equal(rvec, [0, -np.pi, 0])
rvec = at.quaternion_to_rotvec([0, 0, 1, 0])
np.testing.assert_array_almost_equal(rvec, [0, 0, np.pi])
rvec = at.quaternion_to_rotvec([0, 0, -1, 0])
np.testing.assert_array_almost_equal(rvec, [0, 0, -np.pi])
rvec = at.quaternion_to_rotvec([0, 0, 0, 1])
np.testing.assert_array_almost_equal(rvec, [0, 0, 0])
rvec = at.quaternion_to_rotvec([0, 0, 0, -1])
np.testing.assert_array_almost_equal(rvec, [0, 0, 0])
rvec = at.quaternion_to_rotvec([0.25532186, 0.51064372, 0.76596558, -0.29555113])
np.testing.assert_array_almost_equal(rvec, [1, 2, 3])
rvec = at.quaternion_to_rotvec([-0.25532186, -0.51064372, -0.76596558, 0.29555113])
# euler axis is not unique
np.testing.assert_array_almost_equal(rvec, np.array([1, 2, 3])*(1-2*np.pi/np.sqrt(14)))
rvec = at.quaternion_to_rotvec([[0.25532186], [0.51064372], [0.76596558], [-0.29555113]])
np.testing.assert_array_almost_equal(rvec, [[1], [2], [3]])
rvec = at.quaternion_to_rotvec([[1, 0, 0.25532186, 0],
[0, 0, 0.51064372, 0],
[0, 0, 0.76596558, 0],
[0, 1, -0.29555113, -1]])
np.testing.assert_array_almost_equal(rvec, [[np.pi, 0, 1, 0], [0, 0, 2, 0], [0, 0, 3, 0]])
class TestQuaternionToRotMat(TestCase):
def test_quaternion_to_rotmat(self):
rotmat = at.quaternion_to_rotmat([0, 0, 0, 1])
np.testing.assert_array_almost_equal(rotmat, np.eye(3))
rotmat = at.quaternion_to_rotmat([[0], [0], [0], [1]])
np.testing.assert_array_almost_equal(rotmat, np.eye(3))
rotmat = at.quaternion_to_rotmat([[0, 1], [0, 0], [0, 0], [1, 0]])
np.testing.assert_array_almost_equal(rotmat, [np.eye(3), [[1, 0, 0], [0, -1, 0], [0, 0, -1]]])
rotmat = at.quaternion_to_rotmat(at.Rotation([0, 1, 0, 0]))
np.testing.assert_array_almost_equal(rotmat, [[-1, 0, 0], [0, 1, 0], [0, 0, -1]])
rotmat = at.quaternion_to_rotmat([0, 0, np.sqrt(2)/2, np.sqrt(2)/2])
np.testing.assert_array_almost_equal(rotmat, [[0, -1, 0], [1, 0, 0], [0, 0, 1]])
rotmat = at.quaternion_to_rotmat([-0.25532186, -0.51064372, -0.76596558, 0.29555113])
np.testing.assert_array_almost_equal(rotmat, [[-0.69492056, 0.71352099, 0.08929286],
[-0.19200697, -0.30378504, 0.93319235],
[0.69297817, 0.6313497, 0.34810748]])
class TestQuaternionToEuler(TestCase):
def test_quaternion_to_euler(self):
orders = ['xyz', 'zxy', 'yxz', 'yzx', 'xzy', 'zyx', 'xyx', 'yxy', 'xzx', 'zxz', 'yzy', 'zyz']
angles = [[np.pi/3, np.pi/3, 0], [0, np.pi/3, np.pi/3],
[np.pi/3, np.pi/3, np.pi/3],
[-np.pi/3, -np.pi/3, 0], [0, -np.pi/3, -np.pi/3],
[-np.pi/3, -np.pi/3, -np.pi/3],
[1, 2, 3], [1, -2, 3],
[[1, 2, 3, 1], [2, 3, 1, 2], [3, 1, 2, 3]]]
for angle in angles:
for order in orders:
with self.subTest(angle=angle, order=order):
rmat = at.euler_to_rotmat(angle, order=order)
quat = at.rotmat_to_quaternion(rmat)
euler = at.quaternion_to_euler(quat, order=order)
rmat2 = at.euler_to_rotmat(euler, order=order)
quat2 = at.rotmat_to_quaternion(rmat2)
np.testing.assert_almost_equal(quat, quat2)
class TestRotVecToRotMat(TestCase):
def test_rotvec_to_rotmat(self):
rotmat = at.rotvec_to_rotmat([0, 0, 0])
np.testing.assert_array_almost_equal(rotmat, [[1, 0, 0], [0, 1, 0], [0, 0, 1]])
rotmat = at.rotvec_to_rotmat([[0, 0], [0, 0], [0, 0]])
np.testing.assert_array_almost_equal(rotmat, [[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1]]])
rotmat = at.rotvec_to_rotmat([np.pi, 0, 0])
np.testing.assert_array_almost_equal(rotmat, [[1, 0, 0], [0, -1, 0], [0, 0, -1]])
rotmat = at.rotvec_to_rotmat([0, np.pi, 0])
np.testing.assert_array_almost_equal(rotmat, [[-1, 0, 0], [0, 1, 0], [0, 0, -1]])
rotmat = at.rotvec_to_rotmat([0, 0, np.pi])
np.testing.assert_array_almost_equal(rotmat, [[-1, 0, 0], [0, -1, 0], [0, 0, 1]])
rotmat = at.rotvec_to_rotmat([[np.pi, 0, 0], [0, np.pi, 0], [0, 0, -np.pi]])
np.testing.assert_array_almost_equal(rotmat, [[[1, 0, 0], [0, -1, 0], [0, 0, -1]],
[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],
[[-1, 0, 0], [0, -1, 0], [0, 0, 1]]])
rotmat = at.rotvec_to_rotmat([[np.pi / 2, 0], [0, -np.pi / 2], [0, 0]])
np.testing.assert_array_almost_equal(rotmat, [[[1, 0, 0], [0, 0, -1], [0, 1, 0]],
[[0, 0, -1], [0, 1, 0], [1, 0, 0]]])
rotmat = at.rotvec_to_rotmat([[np.pi / 2, 0, 0], [0, 0, -np.pi / 2], [0, 0, 0]])
np.testing.assert_array_almost_equal(rotmat, [[[1, 0, 0], [0, 0, -1], [0, 1, 0]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[0, 0, -1], [0, 1, 0], [1, 0, 0]]])
rotmat = at.rotvec_to_rotmat([1, 2, 3])
np.testing.assert_array_almost_equal(rotmat, [[-0.69492056, 0.71352099, 0.08929286],
[-0.19200697, -0.30378504, 0.93319235],
[0.69297817, 0.6313497, 0.34810748]])
class TestRotVecToQuaternion(TestCase):
def test_rotvec_to_quaternion(self):
q = at.rotvec_to_quaternion([0, 0, 0])
np.testing.assert_array_almost_equal(q, [0, 0, 0, 1])
q = at.rotvec_to_quaternion([[0], [0], [0]])
np.testing.assert_array_almost_equal(q, [[0], [0], [0], [1]])
q = at.rotvec_to_quaternion([np.pi, 0, 0])
np.testing.assert_array_almost_equal(q, [1, 0, 0, 0])
q = at.rotvec_to_quaternion([0, np.pi, 0])
np.testing.assert_array_almost_equal(q, [0, 1, 0, 0])
q = at.rotvec_to_quaternion([0, 0, np.pi])
np.testing.assert_array_almost_equal(q, [0, 0, 1, 0])
q = at.rotvec_to_quaternion([1, 2, 3])
np.testing.assert_array_almost_equal(q, [0.25532186, 0.51064372, 0.76596558, -0.29555113])
q = at.rotvec_to_quaternion([[0], [0], [np.pi]])
np.testing.assert_array_almost_equal(q, [[0], [0], [1], [0]])
q = at.rotvec_to_quaternion([[np.pi, 0, 1, 0],
[0, 0, 2, 0],
[0, 0, 3, 0]])
np.testing.assert_array_almost_equal(q, [[1, 0, 0.25532186, 0],
[0, 0, 0.51064372, 0],
[0, 0, 0.76596558, 0],
[0, 1, -0.29555113, 1]])
class TestRotMatToQuaternion(TestCase):
def test_rotmat_to_quaternion(self):
q = at.rotmat_to_quaternion(np.eye(3))
np.testing.assert_allclose(q, [0, 0, 0, 1], atol=1e-16)
# figure out how to account for the fact that these can be positive or negative
q = at.rotmat_to_quaternion(np.array([[-1., 0, 0], [0, 1, 0], [0, 0, -1]]))
np.testing.assert_allclose(np.abs(q), [0, 1, 0, 0], atol=1e-16)
q = at.rotmat_to_quaternion(np.array([[1., 0, 0], [0, -1, 0], [0, 0, -1]]))
np.testing.assert_allclose(np.abs(q), [1, 0, 0, 0], atol=1e-16)
q = at.rotmat_to_quaternion(np.array([[-1., 0, 0], [0, -1, 0], [0, 0, 1]]))
np.testing.assert_allclose(np.abs(q), [0, 0, 1, 0], atol=1e-16)
q = at.rotmat_to_quaternion([np.eye(3)]*2)
np.testing.assert_allclose(q.T, [[0, 0, 0, 1]]*2, atol=1e-16)
q = at.rotmat_to_quaternion([[0, 1, 0], [-1, 0, 0], [0, 0, 1]])
np.testing.assert_allclose(q, [0, 0, -np.sqrt(2)/2, np.sqrt(2)/2], atol=1e-16)
q = at.rotmat_to_quaternion([[-0.69492056, -0.19200697, 0.69297817],
[0.71352099, -0.30378504, 0.6313497],
[0.08929286, 0.93319235, 0.34810748]])
np.testing.assert_allclose(q, [0.25532186, 0.51064372, 0.76596558, 0.29555113], atol=1e-16)
q = at.rotmat_to_quaternion([[[-0.69492056, -0.19200697, 0.69297817],
[0.71352099, -0.30378504, 0.6313497],
[0.08929286, 0.93319235, 0.34810748]],
np.eye(3)])
np.testing.assert_allclose(q.T, [[0.25532186, 0.51064372, 0.76596558, 0.29555113],
[0, 0, 0, 1]], atol=1e-16)
with self.assertRaises(ValueError):
at.rotmat_to_quaternion([1, 2, 3])
with self.assertRaises(ValueError):
at.rotmat_to_quaternion([[1, 2, 3]])
with self.assertRaises(ValueError):
at.rotmat_to_quaternion([[1], [2], [3]])
with self.assertRaises(ValueError):
at.rotmat_to_quaternion([1, 2, 3, 4])
with self.assertRaises(ValueError):
at.rotmat_to_quaternion([[1, 2, 3, 4]])
with self.assertRaises(ValueError):
at.rotmat_to_quaternion([[1], [2], [3], [4]])
class TestRotMatToEuler(TestCase):
def test_rotmat_to_euler(self):
orders = ['xyz', 'zxy', 'yxz', 'yzx', 'xzy', 'zyx', 'xyx', 'yxy', 'xzx', 'zxz', 'yzy', 'zyz']
angles = [[np.pi/3, np.pi/3, 0], [0, np.pi/3, np.pi/3],
[np.pi/3, np.pi/3, np.pi/3],
[-np.pi/3, -np.pi/3, 0], [0, -np.pi/3, -np.pi/3],
[-np.pi/3, -np.pi/3, -np.pi/3],
[1, 2, 3], [1, -2, 3],
[[1, 2, 3, 1], [2, 3, 1, 2], [3, 1, 2, 3]]]
for angle in angles:
for order in orders:
with self.subTest(angle=angle, order=order):
rmat = at.euler_to_rotmat(angle, order=order)
euler = at.rotmat_to_euler(rmat, order=order)
rmat2 = at.euler_to_rotmat(euler, order=order)
np.testing.assert_almost_equal(rmat, rmat2)
class TestEulerToRotMat(TestCase):
def test_euler_to_rotmat(self):
orders = ['xyz', 'zxy', 'yxz', 'yzx', 'xzy', 'zyx', 'xyx', 'yxy', 'xzx', 'zxz', 'yzy', 'zyz']
angles = [[np.pi/3, 0, 0], [0, np.pi/3, 0], [0, 0, np.pi/3],
[np.pi/3, np.pi/3, 0], [0, np.pi/3, np.pi/3],
[np.pi/3, np.pi/3, np.pi/3],
[-np.pi/3, -np.pi/3, 0], [0, -np.pi/3, -np.pi/3],
[-np.pi/3, -np.pi/3, -np.pi/3],
[1, 2, 3], [1, -2, 3],
[[1, 2, 3, 1], [2, 3, 1, 2], [3, 1, 2, 3]]]
for angle in angles:
for order in orders:
with self.subTest(angle=angle, order=order):
rmat = at.euler_to_rotmat(angle, order=order)
rmat2 = np.eye(3)
for an, ax in zip(angle, order):
if ax.upper().lower() == 'x':
update = at.rot_x(an)
elif ax.upper().lower() == 'y':
update = at.rot_y(an)
elif ax.upper().lower() == 'z':
update = at.rot_z(an)
rmat2 = update @ rmat2
np.testing.assert_almost_equal(rmat, rmat2)
class TestRotX(TestCase):
def test_rot_x(self):
angles = [3*np.pi/2, np.pi, np.pi/2, np.pi/3, 0,
-3*np.pi/2, -np.pi, -np.pi/2, -np.pi/3,
[0, np.pi/2, np.pi/3],
[0, -np.pi/2, -np.pi/3]]
mats = [[[1, 0, 0], [0, 0, 1], [0, -1, 0]],
[[1, 0, 0], [0, -1, 0], [0, 0, -1]],
[[1, 0, 0], [0, 0, -1], [0, 1, 0]],
[[1, 0, 0], [0, 0.5, -np.sqrt(3)/2], [0, np.sqrt(3)/2, 0.5]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[1, 0, 0], [0, 0, -1], [0, 1, 0]],
[[1, 0, 0], [0, -1, 0], [0, 0, -1]],
[[1, 0, 0], [0, 0, 1], [0, -1, 0]],
[[1, 0, 0], [0, 0.5, np.sqrt(3)/2], [0, -np.sqrt(3)/2, 0.5]],
[[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[1, 0, 0], [0, 0, -1], [0, 1, 0]],
[[1, 0, 0], [0, 0.5, -np.sqrt(3)/2], [0, np.sqrt(3)/2, 0.5]]],
[[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[1, 0, 0], [0, 0, 1], [0, -1, 0]],
[[1, 0, 0], [0, 0.5, np.sqrt(3)/2], [0, -np.sqrt(3)/2, 0.5]]]
]
for angle, solu in zip(angles, mats):
with self.subTest(angle=angle):
rmat = at.rot_x(angle)
np.testing.assert_almost_equal(rmat, solu)
class TestRotY(TestCase):
def test_rot_y(self):
angles = [3*np.pi/2, np.pi, np.pi/2, np.pi/3, 0,
-3*np.pi/2, -np.pi, -np.pi/2, -np.pi/3,
[0, np.pi/2, np.pi/3],
[0, -np.pi/2, -np.pi/3]]
srt3d2 = np.sqrt(3)/2
mats = [[[0, 0, -1], [0, 1, 0], [1, 0, 0]],
[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],
[[0, 0, 1], [0, 1, 0], [-1, 0, 0]],
[[0.5, 0, srt3d2], [0, 1, 0], [-srt3d2, 0, 0.5]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[0, 0, 1], [0, 1, 0], [-1, 0, 0]],
[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],
[[0, 0, -1], [0, 1, 0], [1, 0, 0]],
[[0.5, 0, -srt3d2], [0, 1, 0], [srt3d2, 0, 0.5]],
[[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[0, 0, 1], [0, 1, 0], [-1, 0, 0]],
[[0.5, 0, srt3d2], [0, 1, 0], [-srt3d2, 0, 0.5]]],
[[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[0, 0, -1], [0, 1, 0], [1, 0, 0]],
[[0.5, 0, -srt3d2], [0, 1, 0], [srt3d2, 0, 0.5]]]
]
for angle, solu in zip(angles, mats):
with self.subTest(angle=angle):
rmat = at.rot_y(angle)
np.testing.assert_almost_equal(rmat, solu)
class TestRotZ(TestCase):
def test_rot_z(self):
angles = [3*np.pi/2, np.pi, np.pi/2, np.pi/3, 0,
-3*np.pi/2, -np.pi, -np.pi/2, -np.pi/3,
[0, np.pi/2, np.pi/3],
[0, -np.pi/2, -np.pi/3]]
srt3d2 = np.sqrt(3)/2
mats = [[[0, 1, 0], [-1, 0, 0], [0, 0, 1]],
[[-1, 0, 0], [0, -1, 0], [0, 0, 1]],
[[0, -1, 0], [1, 0, 0], [0, 0, 1]],
[[0.5, -srt3d2, 0], [srt3d2, 0.5, 0], [0, 0, 1]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[0, -1, 0], [1, 0, 0], [0, 0, 1]],
[[-1, 0, 0], [0, -1, 0], [0, 0, 1]],
[[0, 1, 0], [-1, 0, 0], [0, 0, 1]],
[[0.5, srt3d2, 0], [-srt3d2, 0.5, 0], [0, 0, 1]],
[[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[0, -1, 0], [1, 0, 0], [0, 0, 1]],
[[0.5, -srt3d2, 0], [srt3d2, 0.5, 0], [0, 0, 1]]],
[[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[0, 1, 0], [-1, 0, 0], [0, 0, 1]],
[[0.5, srt3d2, 0], [-srt3d2, 0.5, 0], [0, 0, 1]]]
]
for angle, solu in zip(angles, mats):
with self.subTest(angle=angle):
rmat = at.rot_z(angle)
np.testing.assert_almost_equal(rmat, solu)
class TestSkew(TestCase):
def test_skew(self):
skew_mat = at.skew([1, 2, 3])
np.testing.assert_array_equal(skew_mat, [[0, -3, 2], [3, 0, -1], [-2, 1, 0]])
skew_mat = at.skew([[1, 2], [2, 3], [3, 4]])
np.testing.assert_array_equal(skew_mat, [[[0, -3, 2], [3, 0, -1], [-2, 1, 0]],
[[0, -4, 3], [4, 0, -2], [-3, 2, 0]]])
class TestNLERP(TestCase):
def test_nlerp(self):
with self.subTest(input_type=list):
q0 = [0, 0, 0, 1]
q1 = [0.5, 0.5, 0.5, 0.5]
qt = at.nlerp(q0, q1, 0)
np.testing.assert_allclose(qt, q0)
qt = at.nlerp(q0, q1, 1)
np.testing.assert_allclose(qt, q1)
qt = at.nlerp(q0, q1, 0.5)
qtrue = (np.array(q0)+np.array(q1))/2
qtrue /= np.linalg.norm(qtrue)
np.testing.assert_allclose(qt, qtrue)
qt = at.nlerp(q0, q1, 0.25)
qtrue = np.array(q0)*(1-0.25)+np.array(q1)*0.25
qtrue /= np.linalg.norm(qtrue)
np.testing.assert_allclose(qt, qtrue)
qt = at.nlerp(q0, q1, 0.79)
qtrue = np.array(q0)*(1-0.79) + np.array(q1)*0.79
qtrue /= np.linalg.norm(qtrue)
np.testing.assert_allclose(qt, qtrue)
q0 = np.array([0.23, 0.45, 0.67, 0.2])
q0 /= np.linalg.norm(q0)
q1 = np.array([-0.3, 0.2, 0.6, 0.33])
q1 /= np.linalg.norm(q1)
qt = at.nlerp(q0, q1, 0)
np.testing.assert_allclose(qt, q0)
qt = at.nlerp(q0, q1, 1)
np.testing.assert_allclose(qt, q1)
qt = at.nlerp(q0, q1, 0.5)
qtrue = (q0+q1)/2
qtrue /= np.linalg.norm(qtrue)
np.testing.assert_allclose(qt, qtrue)
qt = at.nlerp(q0, q1, 0.25)
qtrue = q0*(1-0.25)+q1*0.25
qtrue /= np.linalg.norm(qtrue)
np.testing.assert_allclose(qt, qtrue)
qt = at.nlerp(q0, q1, 0.79)
# comes from ODTBX matlab function
qtrue = (1-0.79)*q0+0.79*q1
qtrue /= np.linalg.norm(qtrue)
| np.testing.assert_allclose(qt, qtrue) | numpy.testing.assert_allclose |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pyburst.grids import grid_analyser
from pyburst.synth import synth
from pyburst.plotting import plot_tools
from pyburst.mcmc import mcmc_plot, mcmc_versions, mcmc_tools, burstfit
# TODO:
# - plot contours
def plot_posteriors(source, mc_version, discard, chain=None, n_walkers=None,
n_steps=None, save=False, display=True):
"""Plots mcmc posteriors for synthetic data
"""
plot_truth(plot_type='posteriors', source=source, mc_version=mc_version, discard=discard,
chain=chain, n_walkers=n_walkers, n_steps=n_steps, save=save, display=display)
def plot_contours(source, mc_version, discard, chain=None, n_walkers=None,
n_steps=None, save=False, display=True):
"""Plots mcmc corner plot for synthetic data
"""
plot_truth(plot_type='contours', source=source, mc_version=mc_version, discard=discard,
chain=chain, n_walkers=n_walkers, n_steps=n_steps, save=save, display=display)
def plot_truth(plot_type, source, mc_version, discard, chain=None, n_walkers=None,
n_steps=None, save=False, display=True):
"""Plots results of MCMC against true values of synthetic data
"""
mcv = mcmc_versions.McmcVersion(source, mc_version)
chain = check_chain(chain, n_walkers=n_walkers, n_steps=n_steps, source=source,
version=mc_version)
truth = synth.get_true_values(source, version=mcv.synth_version,
group=mcv.synth_group)
if plot_type == 'posteriors':
mcmc_plot.plot_posteriors(chain, source=source, version=mc_version,
discard=discard, truth_values=truth, save=save,
display=display)
elif plot_type == 'contours':
mcmc_plot.plot_contours(chain, discard=discard, source=source, truth=True,
version=mc_version, truth_values=truth, save=save,
display=display)
else:
raise ValueError('plot_type must be one of: (posteriors, corner)')
def check_chain(chain, n_walkers, n_steps, source, version):
"""Checks if chain was provided or needs loading
"""
if chain is None:
if None in (n_walkers, n_steps):
raise ValueError('Must provide either chain, or both n_walkers and n_steps')
else:
chain = mcmc_tools.load_chain(source, version=version, n_walkers=n_walkers,
n_steps=n_steps)
return chain
def plot_interp_residuals(synth_source, batches, mc_source, mc_version,
fontsize=16):
"""Plot synthetic burst properties against interpolated predictions
to test accuracy of interpolator
"""
n_sigma = 1.96
bfit = burstfit.BurstFit(source=mc_source, version=mc_version)
bprops = bfit.mcmc_version.bprops
kgrid = grid_analyser.Kgrid(source=synth_source)
param_table = kgrid.get_combined_params(batches)
interp_table = extract_interp_table(param_table, bfit=bfit)
summ_table = kgrid.get_combined_summ(batches)
fig, ax = plt.subplots(len(bprops), figsize=(6, 8))
for i, bprop in enumerate(bprops):
u_bprop = f'u_{bprop}'
yscale = plot_tools.unit_scale(bprop)
yunits = plot_tools.unit_label(bprop)
model = np.array(summ_table[bprop]) / yscale
interp = np.array(interp_table[bprop]) / yscale
u_model = np.array(summ_table[u_bprop]) / yscale
u_interp = np.array(interp_table[u_bprop]) / yscale
residuals = interp - model
u_residuals = n_sigma * np.sqrt(u_model**2 + u_interp**2)
ax[i].errorbar(model, residuals, yerr=u_residuals, marker='o',
ls='none', capsize=3)
x_max = | np.max(model) | numpy.max |
"""
DESCRIPTION
Preprocesses audio data before sending to Neural Network
See demo in in main()
MIT License
Copyright (c) 2018 The-Instrumental-Specialists
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import neuralnet_02 as NN
import numpy as np
import os
import glob
import json
import time
import scipy
import matplotlib.pylab as plt
import scipy.io.wavfile as wavfile
import scipy.fftpack
from scipy.fftpack import dct
def getMax(array_list):
"""Returns a tuple (index,value) of the maximum in an 1D array or list"""
m = array_list[0]
m_index = 0
for i,value in enumerate(array_list):
if value > m:
m = value
m_index = i
return (m_index,m)
def processFile(filename,length = 256,q=1,fs_in=8000,divide=4,plot=False):
"""returns one sided FFT amplitudes of filename
filename (string): ex) 'sax.wav'
length (int): Number of datapoints of one-sided fft (must be even,preferably a power of 2)
q (int): (optional argument) Downsampling Rate
fs_in (int): (optional argument) throw ValueError if fs of filename != fs_in
divide (int): (optional argument) 1/divide*Nsamples is taken from FFT (preferably even)
plot (bool): (optional argument) plots the one sided FFT if True, otherwise does not plot
Note: length < total_time*fs//(2*q*divide)
Ex) length = 256 < (0.25sec)*(44100Hz)//(2*4*2) = 689
"""
length = length*divide
#fs = sample rate, sound = multichannel sound signal
try:
fs1, sound = wavfile.read(filename)
except ValueError:
print(str(filename) + ' failed to process')
return 'failed'
if fs1 != fs_in:
raise ValueError('Sampling rate should be ' + str(fs_in) + ' for: ' + filename)
sig1 = sound[:,0] #left channel
pre_emphasis = 0.97
sig1 = np.append(sig1[0], sig1[1:] - pre_emphasis * sig1[:-1])
fs2, sig2 = downsample(sig1,fs1,q)
N2 = len(sig2)
sig3 = sig2[N2//2-length:N2//2+length]
#print(len(sig3))
FFT = abs(scipy.fft(sig3))
FFT_side = FFT[range(len(FFT)//2)]
#freqs = scipy.fftpack.fftfreq(sig3.size, 1/fs2)
#plt.plot(freqs,FFT)
if len(FFT_side) != length:
print('ERROR MESSAGE DETAILS')
print('filename: ' + filename)
print('length = ' + str(length))
print('fs_in = ' + str(fs_in))
print('q = ' + str(q))
print('divide = ' + str(divide))
total_time = len(sig1)/fs1
print('total_time = ' + str(total_time))
print('Please check: length < total_time*fs//(2*q)')
print('Check: ' + str(length) + ' < ' + str(total_time*fs1//(2*q)))
raise ValueError('Length FFT_side != length: ' + str(len(FFT_side)) + ' != ' + str(length))
FFT_log = []
# normalize FFT
for value in FFT_side:
value = | np.log(value) | numpy.log |
"""
Holds some code for analyzing the faces_basic dataset.
Eventually much of this code should be broken out to functions that are common across datasets,
then this file should hold only study-specific information.
The working directory must be ../../.. relative to this file.
Notes:
https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1004660
0.15 - 200 Hz 1-pole filter
1000 Hz srate
Paper used CAR after rejecting artifacts or epileptiform activity.
58-62 Hz 3rd order Butterworth filter.
400 msec stimulus on (face or house), 400 msec ISI.
50 house and 50 face pictures per run.
Further methods from https://www.sciencedirect.com/science/article/pii/S105381191300935X
Spectral decoupling:
1-sec window centred in the middle of the stimulus.
PSD (Hann -> Fourier -> * complex conjugate)
Normalize w.r.t. mean spectrum across all segments ( psd / mean(psd) )
log(psd)
PCA to get projections from PSD to PSCs (only on freqs < 200 Hz that are not around 60Hz or its harmonics)
Online:
Spectrogram (wavelets), project each time point onto first PSC (broadband)
Smoothing (sigma = 0.05 sec)
z-scoring
exp()
Here we will take a slightly different approach:
PSD -> TensorDecomposition (trials, frequencies, channels)
Raw -> TensorDecomposition (trials, times, channels)
(? DemixingPCA ?)
@author: <NAME>
"""
from pathlib import Path
import numpy as np
DATA_ROOT = Path.cwd() / 'data' / 'kjm_ecog' / 'download' / 'faces_basic'
AREA_LABELS = [
'Temporal pole',
'Parahippocampal gyrus', # parahippocampal part of the medial occipito-temporal gyrus
'Inferior temporal gyrus',
'Middle temporal gyrus',
'fusiform gyrus', # Lateral occipito-temporal gyrus,
'Lingual gyrus', # lingual part of the medial occipito-temporal gyrus
'Inferior occipital gyrus',
'Cuneus',
'Post-ventral cingulate gyrus', # Posterior-ventral part of the
'Middle Occipital gyrus',
'occipital pole',
'precuneus',
'Superior occipital gyrus',
'Post-dorsal cingulate gyrus', # Posterior-dorsal part of the cingulate gyrus
' ',
' ',
' ',
' ',
' ',
'Non-included area',
]
def import_to_npype(subject_id):
import scipy.io
from collections import OrderedDict
from neuropype.engine import InstanceAxis, SpaceAxis, TimeAxis, Chunk, Block, Packet, Flags
data_fn = DATA_ROOT / 'data' / subject_id / (subject_id + '_faceshouses.mat')
dat_contents = scipy.io.loadmat(data_fn)
stim = dat_contents['stim'].reshape(-1) # samples x 1; uint8
data = dat_contents['data'] # samples x channels; float
srate = dat_contents['srate'][0][0]
# Time vector
tvec = np.arange(len(stim)) / srate
# Process the stimulus to get an events chunk
b_stim_onset = np.diff(np.hstack((0, stim))) != 0
b_stim_onset = np.logical_and(b_stim_onset, stim != 0)
stim_inds = np.where(b_stim_onset)[0]
stim_vals = stim[stim_inds]
stim_content = np.repeat(['ISI'], len(stim_vals)).astype(object)
stim_content[stim_vals <= 50] = 'house'
stim_content[ | np.logical_and(stim_vals > 50, stim_vals <= 100) | numpy.logical_and |
import popcorn.input_output as in_out
import popcorn.resampling as resampling
import material_decomposition
import popcorn.spectral_imaging.registration as registration
import popcorn.image_processing.segmentation as segmentation
from popcorn.spectral_imaging.pipelines import skull_alignment_pipeline
import sys
import numpy as np
import math
import time
import SimpleITK as Sitk
from skimage.measure import label, regionprops
from skimage import img_as_ubyte
import PyIPSDK
import PyIPSDK.IPSDKIPLBinarization as Bin
import PyIPSDK.IPSDKIPLMorphology as Morpho
import PyIPSDK.IPSDKIPLAdvancedMorphology as AdvMorpho
import PyIPSDK.IPSDKIPLShapeAnalysis as ShapeAnalysis
def conversion_pipeline(image, bin_factor, min, max):
if bin_factor > 1:
image = resampling.bin_resize(image, bin_factor)
return resampling.conversion_uint16_to_float32(image, min, max)
def retrieve_min_max_from_path(path):
"""looks for min and max float in path
Args:
path (str): folder path
Returns:
(float, float) retrieved min and max values
"""
path = path.replace("\\", "")
path = path.replace("/", "")
return float(path.split("_")[-2]), float(path.split("_")[-1])
def segmentation_coloration(output_folder, concentration_map, segmentation_result, material):
cells = np.copy(concentration_map)
cells[segmentation_result == 0] = 0
cells_part = np.copy(cells)
concentration_map = resampling.normalize_image(concentration_map) / 1.5
cells_part = resampling.normalize_image(cells_part)
noise = np.copy(concentration_map)
noise[cells_part > 0] = 0
mask = np.copy(cells)
mask[mask > 0] = 1
cells_offset = np.copy(concentration_map)
cells_offset[cells_part <= 0] = 0
cells_offset[cells_part > 0] = 0.2
rgb = [0.8, 0.8, 0] if material == "Au" else [0.8, 0.35, 0]
for i in range(cells_part.shape[0]):
red_slice = img_as_ubyte(noise[i, :, :] + rgb[0] * cells_part[i, :, :] + cells_offset[i, :, :])
green_slice = img_as_ubyte(noise[i, :, :] + rgb[1] * cells_part[i, :, :] + cells_offset[i, :, :])
blue_slice = img_as_ubyte(noise[i, :, :] + rgb[2] * cells_part[i, :, :] + cells_offset[i, :, :])
rgb_cells = np.stack((red_slice, green_slice, blue_slice), axis=-1)
in_out.save_tif_image(rgb_cells, output_folder + '{:04d}'.format(i), rgb=True)
class SpectralImagingExperiment:
def __init__(self, name, path, sample_type, modality, materials, resolution, bin_factor=1):
"""constructor of class SpectralImagingExperiment
Args:
name (str): sample radix
path (str): data path
sample_type (str): either "phantom", "rat brain" or "rat knee"
modality (str): modality used for acquisition
materials (list[str]): list of kedge materials
resolution (float): image resolution
bin_factor (int): binning factor when analyzing
"""
self.name = name
self.path = path
self.type = sample_type
self.modality = modality
self.materials = materials
self.resolution = resolution * bin_factor
self.bin_factor = bin_factor
def conversion(self):
"""Converts images from uint16 to float32 using the sample's name min/max inb4 binning them using defined bin
factor
Returns:
None
"""
for material in self.materials:
print("Conversion of :", material)
# We retrieve float min and max from given path
above_image_filenames = in_out.create_list_of_files(self.path + "*Above*" + material + "*", "tif")
above_min, above_max = retrieve_min_max_from_path(in_out.remove_filename_in_path(above_image_filenames[0]))
for index in range(len(above_image_filenames)//self.bin_factor):
# [1/5] Opening
image_to_bin = in_out.open_sequence(above_image_filenames[:self.bin_factor])
# [2/5] Deleting opened files from previous list of files
del above_image_filenames[:self.bin_factor]
# [3/5] Binning
binned_image = resampling.bin_resize(image_to_bin, self.bin_factor)
# [4/5] Conversion
converted_image = resampling.conversion_uint16_to_float32(binned_image, above_min, above_max)
# [5/5] Saving
in_out.save_tif_image(converted_image[0],
self.path + material + "\\Above_Acquisition\\" + '{:04d}'.format(index))
# We retrieve float min and max from given path
below_image_filenames = in_out.create_list_of_files(self.path + "*Below*" + material + "*", "tif")
below_min, below_max = retrieve_min_max_from_path(in_out.remove_filename_in_path(below_image_filenames[0]))
for index in range(len(below_image_filenames)//self.bin_factor):
# [1/5] Opening
image_to_bin = in_out.open_sequence(below_image_filenames[:self.bin_factor])
# [2/5] Deleting opened files from previous list of files
del below_image_filenames[:self.bin_factor]
# [3/5] Binning
binned_image = resampling.bin_resize(image_to_bin, self.bin_factor)
# [4/5] Conversion
converted_image = resampling.conversion_uint16_to_float32(binned_image, below_min, below_max)
# [5/5] Saving
in_out.save_tif_image(converted_image[0],
self.path + material + "\\Below_Acquisition\\" + '{:04d}'.format(index))
def register_volumes(self):
if self.type == "rat brain":
for material in self.materials:
print("--------", "Registering", material, "--------")
above_filenames = in_out.create_list_of_files(self.path + material + "\\Above_Acquisition\\",
"tif")
below_filenames = in_out.create_list_of_files(self.path + material + "\\Below_Acquisition\\",
"tif")
above_image = in_out.open_sequence(above_filenames)
below_image = in_out.open_sequence(below_filenames)
# -- Threshold computation
above_threshold_value = segmentation.find_threshold_value(material, "esrf")
above_mask = above_image > above_threshold_value
below_mask = below_image > above_threshold_value
# -- Extracting skulls
above_skull, above_skull_bbox = segmentation.extract_skull(above_mask)
below_skull, below_skull_bbox = segmentation.extract_skull(below_mask)
rotation_transform = registration.registration_computation(above_image,
below_image,
transform_type="rotation",
metric="msq",
moving_mask=above_skull,
ref_mask=below_skull,
verbose=True)
# Registering the above image
above_image = registration.apply_itk_transformation(above_image, rotation_transform, "linear")
in_out.save_tif_sequence(above_image,
self.path + material + "\\Above_Acquisition_Registered\\")
skull_alignment_pipeline(self.path + material + "\\Above_Acquisition_Registered\\",
self.path + material + "\\Below_Acquisition\\", material)
elif self.type == "mouse knee":
for material in self.materials:
print("--------", "Registering", material, "--------")
above_filenames = in_out.create_list_of_files(self.path + material + "\\Above_Acquisition_Binned\\",
"tif")
below_filenames = in_out.create_list_of_files(self.path + material + "\\Below_Acquisition_Binned\\",
"tif")
above_image = in_out.open_sequence(above_filenames)
below_image = in_out.open_sequence(below_filenames)
# -- Threshold computation
above_threshold_value = segmentation.find_threshold_value(material, "above", "esrf")
above_mask = np.copy(above_image)
above_mask[above_mask > above_threshold_value] = 1
above_mask[above_mask <= above_threshold_value] = 0
below_threshold_value = segmentation.find_threshold_value(material, "below", "esrf")
below_mask = np.copy(below_image)
below_mask[below_mask > below_threshold_value] = 1
below_mask[below_mask <= below_threshold_value] = 0
translation_transform = registration.registration_computation(above_image,
below_image,
transform_type="translation",
metric="msq",
moving_mask=above_mask,
ref_mask=below_mask,
verbose=True)
rotation_transform = registration.registration_computation(above_image,
below_image,
transform_type="rotation",
metric="msq",
moving_mask=above_mask,
ref_mask=below_mask,
verbose=True)
image_to_register = in_out.open_sequence(self.path + material + "\\Above_Acquisition\\")
# Registering the above image
translation_parameters = translation_transform.GetParameters()
translation_transform.SetParameters((translation_parameters[0]*2,
translation_parameters[1]*2,
translation_parameters[2]*2))
image_to_register = registration.apply_itk_transformation(image_to_register,
translation_transform,
"linear")
image_to_register_itk = Sitk.GetImageFromArray(image_to_register)
actual_rotation = Sitk.CenteredTransformInitializer(image_to_register_itk,
image_to_register_itk,
Sitk.Euler3DTransform(),
Sitk.CenteredTransformInitializerFilter.GEOMETRY)
rotation_parameters = rotation_transform.GetParameters()
actual_rotation.SetParameters((rotation_parameters[0],
rotation_parameters[1],
rotation_parameters[2],
rotation_parameters[3] * 2,
rotation_parameters[4] * 2,
rotation_parameters[5] * 2))
image_to_register = registration.apply_itk_transformation(image_to_register, actual_rotation, "linear")
print("\r[1/2] Saving registered image :", end="", flush=True)
in_out.save_tif_sequence(image_to_register,
self.path + material + "\\Above_Acquisition_Registered\\")
image_to_register = None
below_image = in_out.open_sequence(self.path + material + "\\Below_Acquisition\\")
below_image[below_image > below_threshold_value] = 1
below_image[below_image <= below_threshold_value] = 0
print("\r[2/2] Saving below mask :", end="", flush=True)
in_out.save_tif_sequence(below_image,
self.path + material + "\\Below_Mask\\")
print("\n")
elif self.type == "phantom":
for material in self.materials:
print("--------", "Registering", material, "--------")
above_filenames = in_out.create_list_of_files(self.path + material + "\\Above_Acquisition\\",
"tif")
below_filenames = in_out.create_list_of_files(self.path + material + "\\Below_Acquisition\\",
"tif")
above_image = in_out.open_sequence(above_filenames)
below_image = in_out.open_sequence(below_filenames)
# -- Threshold computation
above_mask = np.ones(above_image.shape)
above_mask[above_image > 0.18] = 0
above_mask[above_image < 0.14] = 0
below_mask = np.ones(below_image.shape)
below_mask[below_image > 0.17] = 0
below_mask[below_image < 0.14] = 0
rotation_transform = registration.registration_computation(above_image,
below_image,
transform_type="rotation",
metric="msq",
moving_mask=above_mask,
ref_mask=below_mask,
verbose=True)
# Registering the above image
above_image = registration.apply_itk_transformation(above_image, rotation_transform, "linear")
in_out.save_tif_sequence(above_image,
self.path + material + "\\Above_Acquisition_Registered\\")
def manual_registration(self, slice_of_interest):
"""Function made for manual registration tests
Args:
slice_of_interest (int): around which slice we want to test our shit
"""
for material in self.materials:
if material == "Au":
densities = np.array([19.3, 1.0])
material_attenuations = np.array([[165.8642, 000.1827],
[040.7423, 000.1835]])
else:
densities = np.array([4.93, 1.0])
material_attenuations = np.array([[170.9231, 000.3188],
[033.6370, 000.3307]])
print("--------", "Registering", material, "--------")
above_filenames = in_out.create_list_of_files(self.path + material + "\\Above_Acquisition\\",
"tif")
belowsimage = in_out.open_sequence(
in_out.create_list_of_files(self.path + material + "\\Below_Acquisition\\",
"tif")[slice_of_interest - 5:slice_of_interest + 6])
above_image = in_out.open_sequence(above_filenames)
for z in range(20):
above_image_itk = Sitk.GetImageFromArray(above_image)
translation_transformation = Sitk.TranslationTransform(above_image_itk.GetDimension())
translation_transformation.SetOffset((-0.14, z/10 - 1, 0))
above_image_itk = Sitk.Resample(above_image_itk, translation_transformation, Sitk.sitkLinear, 0.0,
above_image_itk.GetPixelIDValue())
registered_image = Sitk.GetArrayFromImage(above_image_itk)
images = np.stack((registered_image[slice_of_interest-5:slice_of_interest+6], belowsimage), axis=0)
concentration_maps = \
material_decomposition.decomposition_equation_resolution(images, densities, material_attenuations,
volume_fraction_hypothesis=False,
verbose=False)
in_out.save_tif_sequence(concentration_maps[0],
self.path + material + "\\manual_registrationz_" + str(z/10 - 1) + "\\")
def material_decomposition(self, registration_done=False):
"""material decomposition method
Args:
registration_done (bool): did we use registration ?
Returns:
None
"""
if len(self.materials) == 1:
material = self.materials[0]
if self.type == "phantom":
if registration_done:
above_filenames = \
in_out.create_list_of_files(self.path + self.materials[0] + "\\Above_Acquisition_Registered\\",
"tif")
else:
above_filenames = \
in_out.create_list_of_files(self.path + self.materials[0] + "\\Above_Acquisition\\",
"tif")
below_filenames = in_out.create_list_of_files(self.path + self.materials[0] + "\\Below_Acquisition\\",
"tif")
elif self.type == "rat brain":
above_filenames = \
in_out.create_list_of_files(self.path + self.materials[0] + "\\Aligned_Above_Acquisition\\",
"tif")
below_filenames = \
in_out.create_list_of_files(self.path + self.materials[0] + "\\Aligned_Below_Acquisition\\",
"tif")
else:
above_filenames = \
in_out.create_list_of_files(self.path + self.materials[0] + "\\Above_Acquisition_Registered\\",
"tif")
below_filenames = in_out.create_list_of_files(self.path + self.materials[0] + "\\Below_Acquisition\\",
"tif")
time_list = []
for filename_index in range(min(len(above_filenames), len(below_filenames))):
above_image = in_out.open_image(above_filenames[filename_index])
below_image = in_out.open_image(below_filenames[filename_index])
images = np.stack((above_image, below_image), axis=0)
if material == "Au":
densities = np.array([19.3, 1.0])
material_attenuations = np.array([[165.8642, 000.1827],
[040.7423, 000.1835]])
else:
densities = np.array([4.93, 1.0])
material_attenuations = np.array([[170.9231, 000.3188],
[033.6370, 000.3307]])
concentration_maps = \
material_decomposition.decomposition_equation_resolution(images, densities, material_attenuations,
volume_fraction_hypothesis=False,
verbose=False)
start_time = time.time()
material_decomposition.loading_bar(filename_index, min(len(above_filenames), len(below_filenames)))
in_out.save_tif_image(concentration_maps[0], self.path + material + "\\"
+ material + "2materials_decomposition\\" + '{:04d}'.format(filename_index))
# in_out.save_tif_image(concentration_maps[1], self.path + material + "\\"
# + "Bone_decomposition\\" + '{:04d}'.format(filename_index))
# in_out.save_tif_image(concentration_maps[2], self.path + material + "\\"
# + "Water_decomposition\\" + '{:04d}'.format(filename_index))
time_list.append(time.time() - start_time)
print("")
print("Average time for decomposition :", sum(time_list)/len(time_list), "s")
print("Min time for decomposition :", min(time_list), "s")
print("Evolution of decomposition time :", (time_list[-1] - time_list[0])/len(time_list), "s")
elif len(self.materials) == 2:
for material in self.materials:
if material == "I":
if self.type == "phantom":
if registration_done:
above_filenames = in_out.create_list_of_files(self.path + material +
"\\Above_Acquisition_Registered\\",
"tif")
else:
above_filenames = in_out.create_list_of_files(self.path + material +
"\\Above_Acquisition\\",
"tif")
below_filenames = in_out.create_list_of_files(
self.path + material + "\\Below_Acquisition\\",
"tif")
elif self.type == "rat brain":
above_filenames = in_out.create_list_of_files(
self.path + material + "\\Aligned_Above_Acquisition\\",
"tif")
below_filenames = in_out.create_list_of_files(
self.path + material + "\\Aligned_Below_Acquisition\\",
"tif")
else:
above_filenames = in_out.create_list_of_files(
self.path + material + "\\Above_Acquisition_Registered\\",
"tif")
below_filenames = in_out.create_list_of_files(
self.path + material + "\\Below_Acquisition\\",
"tif")
for filename_index in range(min(len(above_filenames), len(below_filenames))):
above_image = in_out.open_image(above_filenames[filename_index])
below_image = in_out.open_image(below_filenames[filename_index])
images = np.stack((above_image, below_image), axis=0)
if material == "Au":
densities = | np.array([19.3, 4.93, 1.0]) | numpy.array |
#import pandas as pd
import numpy as np
import random
from tqdm import tqdm
#from sklearn.linear_model import LinearRegression
#from pandas.core.common import SettingWithCopyWarning
#import warnings
#from .dbtonumpy import eurusd_prices
#warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)
from datetime import datetime, timedelta
import datetime as dt
start_date = dt.date.today()
y = dt.timedelta(days=1*365)
end_date = start_date + y
nb_paths = 10
initial_price = 1.10
def r2_score_and_slope(y):
"""takes numpy array of prices and returns r2 score, slope and constant"""
y = | np.array(y) | numpy.array |
from HW_PI_PiezoStage.PiezoStage_Scan import PiezoStage_Scan
from ScopeFoundry import Measurement
from ScopeFoundry.helper_funcs import sibling_path, load_qt_ui_file
import pyqtgraph as pg
import numpy as np
import time
import pickle
import os.path
from pyqtgraph.Qt import QtGui, QtCore, QtWidgets
from pyqtgraph.Point import Point
import customplotting.mscope as cpm
class PicoHarp_Scan(PiezoStage_Scan):
name = "PicoHarp_Scan"
def setup(self):
PiezoStage_Scan.setup(self)
self.picoharp_hw = self.app.hardware['picoharp']
self.pi_device_hw = self.app.hardware['piezostage']
self.settings.New("Tacq", unit="s", dtype=float, vmin=1e-3, vmax=100*60*60, initial=1) #removed si=True to keep units from auto-changing
self.settings.New("Resolution", dtype=int, choices=[("4 ps", 4), ("8 ps", 8), ("16 ps", 16), ("32 ps", 32), ("64 ps", 64), ("128 ps", 128), ("256 ps", 256), ("512 ps", 512)], initial=4)
self.settings.New("count_rate0", dtype=int, ro=True, vmin=0, vmax=100e6)
self.settings.New("count_rate1", dtype=int, ro=True, vmin=0, vmax=100e6)
self.settings.New("flush_data", dtype=bool, initial=False)
def setup_figure(self):
PiezoStage_Scan.setup_figure(self)
#setup ui for picoharp specific settings
details_groupBox = self.set_details_widget(widget = self.settings.New_UI(include=["Tacq", "Resolution", "count_rate0", "count_rate1", "flush_data"]))
widgets = details_groupBox.findChildren(QtGui.QWidget)
tacq_spinBox = widgets[1]
resolution_comboBox = widgets[4]
count_rate0_spinBox = widgets[6]
count_rate1_spinBox = widgets[9]
flush_data_checkBox = widgets[12]
#connect settings to ui
self.picoharp_hw.settings.Tacq.connect_to_widget(tacq_spinBox)
self.picoharp_hw.settings.Resolution.connect_to_widget(resolution_comboBox)
self.picoharp_hw.settings.count_rate0.connect_to_widget(count_rate0_spinBox)
self.picoharp_hw.settings.count_rate1.connect_to_widget(count_rate1_spinBox)
self.settings.flush_data.connect_to_widget(flush_data_checkBox)
tacq_spinBox.valueChanged.connect(self.update_estimated_scan_time)
self.update_estimated_scan_time()
#save data buttons
self.ui.save_image_pushButton.clicked.connect(self.save_intensities_image)
self.ui.save_array_pushButton.clicked.connect(self.save_intensities_data)
#setup imageview
self.imv = pg.ImageView()
self.imv.getView().setAspectLocked(lock=False, ratio=1)
self.imv.getView().setMouseEnabled(x=True, y=True)
self.imv.getView().invertY(False)
roi_plot = self.imv.getRoiPlot().getPlotItem()
roi_plot.getAxis("bottom").setLabel(text="Time (ns)")
def update_estimated_scan_time(self):
try:
self.overhead = self.x_range * self.y_range * .055 #determined by running scans and timing
self.scan_time = self.x_range * self.y_range * self.settings["Tacq"] + self.overhead
self.ui.estimated_scan_time_label.setText("Estimated scan time: " + "%.2f" % self.scan_time + "s")
except:
pass
def update_display(self):
PiezoStage_Scan.update_display(self)
if hasattr(self, 'sum_intensities_image_map') and hasattr(self, 'hist_data'):
self.picoharp_hw.read_from_hardware()
if not self.interrupt_measurement_called:
per_pixel = self.scan_time/(self.x_range * self.y_range)
seconds_left = per_pixel * (self.x_range * self.y_range - self.pixels_scanned)
self.ui.estimated_time_label.setText("Estimated time remaining: " + "%.2f" % seconds_left + "s")
self.img_item.setImage(self.sum_intensities_image_map) #update stage image
#update imageview
self.times = self.time_data[:, 0, 0]*1e-3
self.imv.setImage(img=self.hist_data, autoRange=False, autoLevels=True, xvals=self.times)
self.imv.show()
self.imv.window().setWindowFlag(QtCore.Qt.WindowCloseButtonHint, False) #disable closing image view window
#update progress bar
progress = 100 * ((self.pixels_scanned+1)/np.abs(self.x_range*self.y_range))
self.ui.progressBar.setValue(progress)
self.set_progress(progress)
pg.QtGui.QApplication.processEvents()
def pre_run(self):
try:
PiezoStage_Scan.pre_run(self) #setup scan paramters
self.picoharp = self.picoharp_hw.picoharp
self.check_filename("_raw_PL_hist_data.pkl")
self.num_hist_chans = self.app.hardware['picoharp'].calc_num_hist_chans()
dirname = self.app.settings['save_dir']
self.check_filename('_histdata.dat')
sample_filename = self.app.settings['sample']
self.hist_filename = os.path.join(dirname, sample_filename + '_histdata.dat')
self.check_filename('_timedata.dat')
self.time_filename = os.path.join(dirname, sample_filename + '_timedata.dat')
hist_len = self.num_hist_chans
#Use memmaps to use less memory and store data into disk
self.hist_data= | np.memmap(self.hist_filename,dtype='float32',mode='w+',shape=(hist_len, self.x_range, self.y_range)) | numpy.memmap |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 10 15:15:09 2019
@author: egomez
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
from exponential_fit import decission_data_exponential
from lowess_fit import significance_analysis
def func_exp_pure(x, a, c):
return a*np.exp(-c*(x))
def plot_decission_with_LOWESS(df, combination,test, measure, fs = None,
width = None, height = None, path = None,
file_name = None, colors = None, #ctheme_lowess = None,
sign_level = None, gamma = None):
"""
Function to plot the estimated p-values and the fitted exponential function
by comparisons:
- df: pandas dataframe containing the estimated p-values
- combination: dictionary with the list of comparisons, i.e.
combination={
'0': 'A02_A03',
'1': 'A02_A09',
'2': 'A02_A16',
'3': 'A02_A29',
'4': 'A02_A35',
'5': 'A02_A55',
'6': 'A02_A65',
'7': 'A02_A85',
'8': 'A02_A96'
}
- test: dictionary, i.e. {'0': 'MannWhitneyU'}
- measure: measures for which the plot is done, i.e.
variables={
'0': 'area (px^2)',
'1': 'short axis length (px)',
'2': 'orientation'
}
Optional parameters:
- ctheme: list of colors. Each measure is plot with a different color.
- fs: optional. font size
- height: height of the figure
- width: width of the figure
- path: directory in which the figure will be saved. If it is None, then
the image is not saved.
- file_name: name to use for the figure if it is saved.
- sign_level: alpha for statistical significance of 100(1-alpha).
Set by default as 0.05.
- gamma: threshold to calculate the convergence of p(n) and Theta function.
"""
if colors is None:
colors = ['#B80652', '#FF2017', '#F36635', #'#DD9952',
'#CABB04',
#'#A7C850',
'#56C452', '#2EBAB3', '#1C69A8', '#25369A',
'#4E3180']
if fs is None:
fs = 10
if height is None:
height = 5
if width is None:
width = 10
if path is None:
save_fig = 0
else:
save_fig = 1
if file_name is None:
# Change the format to png, jpg, or any other one as
# "file_name = 'p_values.pdf"
file_name = 'p_values.png'
if sign_level is None:
sign_level = 0.05
if gamma is None:
gamma = 5e-06
param = decission_data_exponential(df, combination, measure,
sign_level = sign_level, gamma = gamma)
N = 500
for c in range(len(combination)):
param1 = param[param.comparison==combination[np.str(c)]]
print(combination[np.str(c)])
f = plt.gcf()
ax = plt.gca()
f.set_figwidth(width)
f.set_figheight(height)
mpl.style.use('seaborn')
sns.set_style("white")
sns.set_context("talk")
splot = ax
labels = ['A']
df_comparison = df[df.comparison == combination[np.str(c)]]
for i in range(len(measure)):
df_measure = df_comparison[df_comparison.measure == measure[np.str(i)]]
for t in range(len(test)):
# Plot LOWESS fit
df_test = df_measure[df_measure.test == test[np.str(t)]]
L, dcoeff, positive_N = significance_analysis(df_test,
sign_level = sign_level)
positive_N = (param1[measure[np.str(i)]+
'_nalpha_estimated'][0])
splot.plot(L[:,0], L[:,1], color=colors[i])# color = ctheme[i],
# splot.fill_between(L[:,0], L[:,1], 0*L[:,1], color = ctheme_lowess[i], alpha=0.7)
labels = np.concatenate((labels, [measure[np.str(i)] +
r' $\hat{n}_\alpha$ = ' +
np.str(positive_N)]))
# EXPONENTIAL FIT
par_a,par_c= (param1[measure[np.str(i)]+'_exp_params'][0])
positive_N = (param1[measure[np.str(i)]+
'_nalpha_theory'][0])
splot.plot(np.arange(0,N),
func_exp_pure(np.arange(0,N),par_a,par_c),
linestyle='--', color=colors[i])# color = ctheme[i],
labels = np.concatenate((labels, [r'Exponential fit $n_{\alpha}$ = ' +
np.str(positive_N)]))
splot.tick_params(labelsize = fs)
y = sign_level*np.ones((len(np.arange(0,N))))
splot.plot(np.arange(0,N), y, color = 'black')
labels = np.concatenate((labels, [r'$\alpha = 0.05$']))
splot.legend(labels[1:], bbox_to_anchor=(1, 1),ncol = 1,fancybox=True,
shadow=True, fontsize = fs) # loc='best',
splot.set_title(combination[np.str(c)], fontsize = fs)
splot.set_xlabel('Sample size (n)', fontsize = fs)
splot.set_ylabel('p-value ' + combination[np.str(c)], fontsize = fs)
splot.set_ylim([0,0.45])
f.tight_layout()
if save_fig == 1:
plt.savefig(path + combination[np.str(c)] + file_name, dpi=75)
plt.show()
def plot_pcurve_by_measure(df, combination, measure, test = None, fs = None,
width = None, height = None, path = None,
file_name = None, colors = None, #ctheme_lowess = None,
sign_level = None, gamma = None):
"""
Function to plot the estimated p-values and the fitted exponential function
by measures:
- df: pandas dataframe containing the estimated p-values
- combination: dictionary with the list of comparisons, i.e.
combination={
'0': 'A02_A03',
'1': 'A02_A09',
'2': 'A02_A16',
'3': 'A02_A29',
'4': 'A02_A35',
'5': 'A02_A55',
'6': 'A02_A65',
'7': 'A02_A85',
'8': 'A02_A96'
}
- measure: measures for which the plot is done, i.e.
variables={
'0': 'area (px^2)',
'1': 'short axis length (px)',
'2': 'orientation'
}
Optional parameters:
- test: dictionary, i.e. {'0': 'MannWhitneyU'}
- ctheme: list of colors. Each measure is plot with a different color.
- fs: optional. font size
- height: height of the figure
- width: width of the figure
- path: directory in which the figure will be saved. If it is None, then
the image is not saved.
- file_name: name to use for the figure if it is saved.
- sign_level: alpha for statistical significance of 100(1-alpha).
Set by default as 0.05.
- gamma: threshold to calculate the convergence of p(n) and Theta function.
"""
if colors is None:
colors = ['#B80652', '#FF2017', '#F36635', #'#DD9952',
'#CABB04',
#'#A7C850',
'#56C452', '#2EBAB3', '#1C69A8', '#25369A',
'#4E3180']
if fs is None:
fs = 10
if height is None:
height = 8
if width is None:
width = 10
if path is None:
save_fig = 0
else:
save_fig = 1
if file_name is None:
# Change the format to png, jpg, or any other one as
# "file_name = 'p_values.pdf"
file_name = 'p_values.png'
if sign_level is None:
sign_level = 0.05
if test is None:
test ={'0': 'MannWhitneyU'}
if gamma is None:
gamma = 5e-06
param = decission_data_exponential(df, combination, measure,
sign_level = sign_level, gamma = gamma)
N = max(df.N)
# N = 1200 # 400
for i in range(len(measure)):
df_measure = df[df.measure == measure[ | np.str(i) | numpy.str |
import unittest
import numpy as np
class Test_UFUNCTests(unittest.TestCase):
#region UFUNC ADD tests
def test_UFUNC_AddAccumlate_1(self):
x = np.arange(8);
a = np.add.accumulate(x)
print(a)
x = np.arange(8).reshape((2,2,2))
b = np.add.accumulate(x)
print(b)
c = np.add.accumulate(x, 0)
print(c)
d = np.add.accumulate(x, 1)
print(d)
e = np.add.accumulate(x, 2)
print(e)
def test_UFUNC_AddReduce_1(self):
x = np.arange(8);
a = np.add.reduce(x)
print(a)
x = np.arange(8).reshape((2,2,2))
b = np.add.reduce(x)
print(b)
c = np.add.reduce(x, 0)
print(c)
d = np.add.reduce(x, 1)
print(d)
e = np.add.reduce(x, 2)
print(e)
def test_UFUNC_AddReduce_2(self):
x = np.arange(8).reshape((2,2,2))
b = np.add.reduce(x)
print(b)
c = np.add.reduce(x, (0,1))
print(c)
d = np.add.reduce(x, (1,2))
print(d)
e = np.add.reduce(x, (2,1))
print(e)
def test_UFUNC_AddReduceAt_1(self):
a =np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2]
print(a)
print("**********")
x = np.linspace(0, 15, 16).reshape(4,4)
b = np.add.reduceat(x, [0, 3, 1, 2, 0])
print(b)
print("**********")
c = np.multiply.reduceat(x, [0, 3], axis = 1)
print(c)
def test_UFUNC_AddOuter_1(self):
x = np.arange(4);
a = np.add.outer(x, x)
print(a.shape)
print(a)
x = np.arange(6).reshape((3,2))
y = np.arange(6).reshape((2,3))
b = np.add.outer(x, y)
print(b.shape)
print(b)
#endregion
#region UFUNC SUBTRACT tests
def test_UFUNC_SubtractAccumulate_1(self):
x = np.arange(8);
a = np.subtract.accumulate(x)
print(a)
x = np.arange(8).reshape((2,2,2))
b = np.subtract.accumulate(x)
print(b)
c = np.subtract.accumulate(x, 0)
print(c)
d = np.subtract.accumulate(x, 1)
print(d)
e = np.subtract.accumulate(x, 2)
print(e)
def test_UFUNC_SubtractReduce_1(self):
x = np.arange(8);
a = np.subtract.reduce(x)
print(a)
x = np.arange(8).reshape((2,2,2))
b = np.subtract.reduce(x)
print(b)
c = np.subtract.reduce(x, 0)
print(c)
d = np.subtract.reduce(x, 1)
print(d)
e = np.subtract.reduce(x, 2)
print(e)
def test_UFUNC_SubtractReduceAt_1(self):
a =np.subtract.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2]
print(a)
print("**********")
x = | np.linspace(0, 15, 16) | numpy.linspace |
# This file is heavily based on waterfall.py from satnogs-client.
import matplotlib
import numpy as np
import sys
matplotlib.use('Agg')
import matplotlib.pyplot as plt
OFFSET_IN_STDS = -2.0
SCALE_IN_STDS = 8.0
class Waterfall():
"""
Parse waterfall data file
:param datafile_path: Path to data file
:type datafile_path: str_array
"""
def __init__(self, datafile_path, logger=None):
"""
Class constructor
"""
self.logger = logger or sys.stdout
self.data = self._get_waterfall(datafile_path)
def plot(self, figure_path, vmin=None, vmax=None):
"""
Plot waterfall into a figure
:param figure_path: Path of figure file to save
:type figure_path: str
:param value_range: Minimum and maximum value range
:type value_range: tuple
"""
tmin = np.min(self.data['data']['tabs'] / 1000000.0)
tmax = np.max(self.data['data']['tabs'] / 1000000.0)
fmin = np.min(self.data['freq'] / 1000.0)
fmax = np.max(self.data['freq'] / 1000.0)
if vmin is None or vmax is None:
vmin = -100
vmax = -50
c_idx = self.data['data']['spec'] > -200.0
if np.sum(c_idx) > 100:
data_mean = np.mean(self.data['data']['spec'][c_idx])
data_std = np.std(self.data['data']['spec'][c_idx])
vmin = data_mean - 2.0 * data_std
vmax = data_mean + 4.0 * data_std
plt.figure(figsize=(10, 20))
plt.imshow(self.data['data']['spec'],
origin='lower',
aspect='auto',
interpolation='None',
extent=[fmin, fmax, tmin, tmax],
vmin=vmin,
vmax=vmax,
cmap='Greens') # 'viridis', 'plasma', 'inferno', 'magma', 'cividis'
# also, see https://matplotlib.org/stable/tutorials/colors/colormaps.html
plt.xlabel('Frequency (kHz)')
plt.ylabel('Time (seconds)')
fig = plt.colorbar(aspect=50)
fig.set_label('Power (dB)')
plt.savefig(figure_path, bbox_inches='tight')
plt.close()
def _read_waterfall(self, datafile_path):
"""
Read waterfall data file
:param datafile_path: Path to data file
:type datafile_path: str
:return: Waterfall data
:rtype: dict
"""
waterfall = {}
with open(datafile_path, mode='rb') as datafile:
waterfall = {
'timestamp': np.fromfile(datafile, dtype='|S32', count=1)[0],
'nchan': np.fromfile(datafile, dtype='>i4', count=1)[0],
'samp_rate': np.fromfile(datafile, dtype='>i4', count=1)[0],
'nfft_per_row': np.fromfile(datafile, dtype='>i4', count=1)[0],
'center_freq': np.fromfile(datafile, dtype='>f4', count=1)[0],
'endianess': np.fromfile(datafile, dtype='>i4', count=1)[0]
}
# Let's disable the logging for now.
# self.logger.write("Waterfall details: " + repr(waterfall) + "\n")
data_dtypes = np.dtype([('tabs', 'int64'), ('spec', 'float32', (waterfall['nchan'], ))])
waterfall['data'] = np.fromfile(datafile, dtype=data_dtypes)
if waterfall['data'].size == 0:
raise EOFError
datafile.close()
return waterfall
def _compress_waterfall(self, waterfall):
"""
Compress spectra of waterfall
:param waterfall: Watefall data
:type waterfall: dict
:return: Compressed spectra
:rtype: dict
"""
spec = waterfall['data']['spec']
std = np.std(spec, axis=0)
offset = | np.mean(spec, axis=0) | numpy.mean |
# -*- coding: utf-8 -*-
"""
Created on Mon 2018.02.28:00:00:00
@author: jac-nosm
Abstrac:
Dilution experiments are performed routinely in microbial laboratories.
However, their results are not properly analyzed, with only very basic hueristic
formulas. We present a proper statistical analysis of these experiments and
include also a further analysis of the bacterial evolution under temperature stress.
"""
# Packages
from numpy import array, zeros, exp
from DilExp import DilExp, MultiDilExp
from matplotlib import pyplot as plt
def TransformTNTC( count, c=300):
if isinstance( count, int):
return count
else:
return c
def AnaBF( spreadsheet, lab, T=100000, control=False, data_all=True):
"""Analyse BF for the repetitions in all data
experiment in lab = 'Lab5', 'Lab6', 'Lab8' or
if control 'Lab5Cntrl', 'Lab6Cntrl', 'Lab8Cntrl'
not(data_all), that is, only include the first countable dilution.
"""
J=7 ## Dilutions < J
c=300 ##maximum count for the drop or plated volumen, TNTC threshold
d = DilExp( J=J, alpha0=4, alpha=10, alphap=100, c=c, Sc=4.13, Sc_unit=r"cm^2", q=0.05, pr_a=10)
""" alpha0, dilution factor for tube 1 from tube 0: 1ml from 4*10ml
alpha, dilution factor for each tube =10, 1ml from 10ml
alphap, dilution factor for the drop or plated volumen =100, 0.1ml from 10ml
q, probibility of misscounting (=0.05)
pr_a, prior mean of the count, given the dilution s to be counted (=20)
Sc, coupon surface area (=4.13)
Sc_unit, coupon surface area units (=r"cm^2").
"""
if control:
sheet = lab + 'Cntrl'
#print("Analizing control experiment of %s" % (lab,))
else:
sheet = lab
#print("Analizing experiment of %s" % (lab,))
rt = []
for k in range(3): #Repetitions 1, 2, 3
s = zeros(2)
y = | zeros(2) | numpy.zeros |
# -*- coding: utf-8 -*-
"""MCIR for PET with primal-dual algorithms.
Usage:
PET_MCIR_PD [--help | options]
Options:
-T <pattern>, --trans=<pattern> transformation pattern, * or % wildcard
(e.g., tm_ms*.txt). Enclose in quotations.
-t <str>, --trans_type=<str> transformation type (tm, disp, def)
[default: tm]
-S <pattern>, --sino=<pattern> sinogram pattern, * or % wildcard
(e.g., sino_ms*.hs). Enclose in quotations.
-a <pattern>, --attn=<pattern> attenuation pattern, * or % wildcard
(e.g., attn_ms*.hv). Enclose in quotations.
-R <pattern>, --rand=<pattern> randoms pattern, * or % wildcard
(e.g., rand_ms*.hs). Enclose in quotations.
-n <norm>, --norm=<norm> ECAT8 bin normalization file
-e <int>, --epoch=<int> num epochs [default: 10]
-r <string>, --reg=<string> regularisation ("None","FGP_TV","explicit_TV", ...)
[default: None]
-o <outp>, --outp=<outp> output file prefix [default: recon]
--outpath=<string> output folder path [default: './']
--param_path=<string> param folder path [default: './']
--nxny=<nxny> image x and y dimension [default: 127]
--dxdy=<dxdy> image x and y spacing
(default: determined by scanner)
-I <str>, --initial=<str> Initial estimate
--visualisations show visualisations
--nifti save output as nifti
--gpu use GPU projector
-v <int>, --verbosity=<int> STIR verbosity [default: 0]
-s <int>, --save_interval=<int> save every x iterations [default: 10]
--descriptive_fname option to have descriptive filenames
--update_obj_fn_interval=<int> frequency to update objective function
[default: 1]
--alpha=<val> regularisation strength (if used)
[default: 0.5]
--reg_iters=<val> Number of iterations for the regularisation
subproblem [default: 100]
--precond Use preconditioning
--numSegsToCombine=<val> Rebin all sinograms, with a given number of
segments to combine. Increases speed.
--numViewsToCombine=<val> Rebin all sinograms, with a given number of
views to combine. Increases speed.
--normaliseDataAndBlock Normalise raw data and block operator by
multiplying by 1./normK.
--algorithm=<string> Which algorithm to run [default: spdhg]
--numThreads=<int> Number of threads to use
--numSubsets=<int> Number of physical subsets to use [default: 1]
--gamma=<val> parameter controlling primal-dual trade-off (>1 promotes dual)
[default: 1.]
--PowerMethod_iters=<val> number of iterations for the computation of operator norms
with the power method [default: 10]
--templateAcqData Use template acd data
--StorageSchemeMemory Use memory storage scheme
"""
# SyneRBI Synergistic Image Reconstruction Framework (SIRF)
# Copyright 2020 University College London.
#
# This is software developed for the Collaborative Computational
# Project in Synergistic Reconstruction for Biomedical Imaging
# (formerly CCP PETMR)
# (http://www.ccpsynerbi.ac.uk/).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from os import path
import os
from glob import glob
from docopt import docopt
import matplotlib.pyplot as plt
import numpy as np
from sirf.Utilities import error, show_2D_array, examples_data_path
import sirf.Reg as reg
import sirf.STIR as pet
from cil.framework import BlockDataContainer, ImageGeometry, BlockGeometry
from cil.optimisation.algorithms import PDHG, SPDHG
from cil.optimisation.functions import \
KullbackLeibler, BlockFunction, IndicatorBox, MixedL21Norm, ScaledFunction
from cil.optimisation.operators import \
CompositionOperator, BlockOperator, LinearOperator, GradientOperator, ScaledOperator
from cil.plugins.ccpi_regularisation.functions import FGP_TV
from ccpi.filters import regularisers
from cil.utilities.multiprocessing import NUM_THREADS
__version__ = '0.1.0'
args = docopt(__doc__, version=__version__)
###########################################################################
# Global set-up
###########################################################################
# storage scheme
if args['--StorageSchemeMemory']:
pet.AcquisitionData.set_storage_scheme('memory')
else:
pet.AcquisitionData.set_storage_scheme('default')
# Verbosity
pet.set_verbosity(int(args['--verbosity']))
if int(args['--verbosity']) == 0:
msg_red = pet.MessageRedirector(None, None, None)
# Number of threads
numThreads = args['--numThreads'] if args['--numThreads'] else NUM_THREADS
pet.set_max_omp_threads(numThreads)
if args['--templateAcqData']:
template_acq_data = pet.AcquisitionData('Siemens_mMR', span=11, max_ring_diff=15, view_mash_factor=1)
def main():
"""Run main function."""
use_gpu = True if args['--gpu'] else False
###########################################################################
# Parse input files
###########################################################################
[num_ms, trans_files, sino_files, attn_files, rand_files] = \
get_filenames(args['--trans'],args['--sino'],args['--attn'],args['--rand'])
###########################################################################
# Read input
###########################################################################
[trans, sinos_raw, attns, rands_raw] = \
read_files(trans_files, sino_files, attn_files, rand_files, args['--trans_type'])
sinos = pre_process_sinos(sinos_raw, num_ms)
rands = pre_process_sinos(rands_raw, num_ms)
###########################################################################
# Initialise recon image
###########################################################################
image = get_initial_estimate(sinos,use_gpu)
###########################################################################
# Set up resamplers
###########################################################################
if trans is None:
resamplers = None
else:
resamplers = [get_resampler(image, trans=tran) for tran in trans]
###########################################################################
# Resample attenuation images (if necessary)
###########################################################################
resampled_attns = resample_attn_images(num_ms, attns, trans, use_gpu, image)
print ("resampled_attns", len (resampled_attns))
###########################################################################
# Set up acquisition models (one per motion state)
###########################################################################
acq_models, masks = set_up_acq_models(
num_ms, sinos, rands, resampled_attns, image, use_gpu)
###########################################################################
# Set up reconstructor
###########################################################################
if args['--reg']=='explicit_TV':
[F, G, K, normK, tau, sigma, use_axpby, prob, gamma] = set_up_explicit_reconstructor(
use_gpu, num_ms, image, acq_models, resamplers, masks, sinos, rands)
else:
[F, G, K, normK, tau, sigma, use_axpby, prob, gamma] = set_up_reconstructor(
use_gpu, num_ms, acq_models, resamplers, masks, sinos, rands)
###########################################################################
# Get output filename
###########################################################################
outp_file = get_output_filename(
attn_files, normK, sigma, tau, sino_files, resamplers, use_gpu)
###########################################################################
# Get algorithm
###########################################################################
algo, num_iter = get_algo(F, G, K, normK, tau, sigma, gamma, use_axpby, prob, outp_file,image)
###########################################################################
# Create save call back function
###########################################################################
save_callback = get_save_callback_function(outp_file, num_iter)
###########################################################################
# Run the reconstruction
###########################################################################
# algo.run(num_iter, verbose=2, print_interval=1, callback=save_callback)
algo.run(num_iter, verbose=2, callback=save_callback)
def get_filenames(trans, sino, attn, rand):
"""Get filenames."""
trans_pattern = str(trans).replace('%', '*')
sino_pattern = str(sino).replace('%', '*')
attn_pattern = str(attn).replace('%', '*')
rand_pattern = str(rand).replace('%', '*')
if sino_pattern is None:
raise AssertionError("--sino missing")
trans_files = sorted(glob(trans_pattern))
sino_files = sorted(glob(sino_pattern))
attn_files = sorted(glob(attn_pattern))
rand_files = sorted(glob(rand_pattern))
num_ms = len(sino_files)
# Check some sinograms found
if num_ms == 0:
raise AssertionError("No sinograms found at {}!".format(sino_pattern))
# Should have as many trans as sinos
if len(trans_files) > 0 and num_ms != len(trans_files):
raise AssertionError("#trans should match #sinos. "
"#sinos = " + str(num_ms) +
", #trans = " + str(len(trans_files)))
# If any rand, check num == num_ms
if len(rand_files) > 0 and len(rand_files) != num_ms:
raise AssertionError("#rand should match #sinos. "
"#sinos = " + str(num_ms) +
", #rand = " + str(len(rand_files)))
# For attn, there should be 0, 1 or num_ms images
if len(attn_files) > 1 and len(attn_files) != num_ms:
raise AssertionError("#attn should be 0, 1 or #sinos")
return [num_ms, trans_files, sino_files, attn_files, rand_files]
def read_files(trans_files, sino_files, attn_files, rand_files, trans_type):
"""Read files."""
if trans_files == []:
trans = None
else:
if trans_type == "tm":
trans = [reg.AffineTransformation(file) for file in trans_files]
elif trans_type == "disp":
trans = [reg.NiftiImageData3DDisplacement(file)
for file in trans_files]
elif trans_type == "def":
trans = [reg.NiftiImageData3DDeformation(file)
for file in trans_files]
else:
raise error("Unknown transformation type")
sinos_raw = [pet.AcquisitionData(file) for file in sino_files]
attns = [pet.ImageData(file) for file in attn_files]
# fix a problem with the header which doesn't allow
# to do algebra with randoms and sinogram
rands_arr = [pet.AcquisitionData(file).as_array() for file in rand_files]
rands_raw = [ s * 0 for s in sinos_raw ]
for r,a in zip(rands_raw, rands_arr):
r.fill(a)
return [trans, sinos_raw, attns, rands_raw]
def pre_process_sinos(sinos_raw, num_ms):
"""Preprocess raw sinograms.
Make positive if necessary and do any required rebinning."""
# If empty (e.g., no randoms), return
if not sinos_raw:
return sinos_raw
# Loop over all sinograms
sinos = [0]*num_ms
for ind in range(num_ms):
# If any sinograms contain negative values
# (shouldn't be the case), set them to 0
sino_arr = sinos_raw[ind].as_array()
if (sino_arr < 0).any():
print("Input sinogram " + str(ind) +
" contains -ve elements. Setting to 0...")
sinos[ind] = sinos_raw[ind].clone()
sino_arr[sino_arr < 0] = 0
sinos[ind].fill(sino_arr)
else:
sinos[ind] = sinos_raw[ind]
# If rebinning is desired
segs_to_combine = 1
if args['--numSegsToCombine']:
segs_to_combine = int(args['--numSegsToCombine'])
views_to_combine = 1
if args['--numViewsToCombine']:
views_to_combine = int(args['--numViewsToCombine'])
if segs_to_combine * views_to_combine > 1:
sinos[ind] = sinos[ind].rebin(segs_to_combine, views_to_combine)
# only print first time
if ind == 0:
print("Rebinned sino dimensions: {sinos[ind].dimensions()}")
return sinos
def get_initial_estimate(sinos, use_gpu):
"""Get initial estimate."""
# from the arguments
initial_estimate = args['--initial']
nxny = int(args['--nxny'])
if initial_estimate:
image = pet.ImageData(initial_estimate)
elif args['--templateAcqData']:
image = sinos[0].create_uniform_image(0., (127, 220, 220))
image.initialise(dim=(127, 220, 220), vsize=(2.03125, 1.7080754, 1.7080754))
else:
# Create image based on ProjData
image = sinos[0].create_uniform_image(0.0, (nxny, nxny))
# If using GPU, need to make sure that image is right size.
if use_gpu:
dim = (127, 320, 320)
spacing = (2.03125, 2.08626, 2.08626)
# elif non-default spacing desired
elif args['--dxdy']:
dim = image.dimensions()
dxdy = float(args['--dxdy'])
spacing = (image.voxel_sizes()[0], dxdy, dxdy)
if use_gpu or args['--dxdy']:
image.initialise(dim=dim,
vsize=spacing)
image.fill(0.0)
return image
def get_resampler(image, ref=None, trans=None):
"""Return a NiftyResample object for the specified transform and image."""
if ref is None:
ref = image
resampler = reg.NiftyResample()
resampler.set_reference_image(ref)
resampler.set_floating_image(image)
resampler.set_padding_value(0)
resampler.set_interpolation_type_to_linear()
if trans is not None:
resampler.add_transformation(trans)
return resampler
def resample_attn_images(num_ms, attns, trans, use_gpu, image):
"""Resample attenuation images if necessary."""
resampled_attns = None
if trans is None:
resampled_attns = attns
else:
if len(attns) > 0:
resampled_attns = [0]*num_ms
# if using GPU, dimensions of attn and recon images have to match
ref = image if use_gpu else None
for i in range(num_ms):
# if we only have 1 attn image, then we need to resample into
# space of each gate. However, if we have num_ms attn images,
# then assume they are already in the correct position, so use
# None as transformation.
tran = trans[i] if len(attns) == 1 else None
# If only 1 attn image, then resample that. If we have num_ms
# attn images, then use each attn image of each frame.
attn = attns[0] if len(attns) == 1 else attns[i]
resam = get_resampler(attn, ref=ref, trans=tran)
resampled_attns[i] = resam.forward(attn)
return resampled_attns
def set_up_acq_models(num_ms, sinos, rands, resampled_attns, image, use_gpu):
"""Set up acquisition models."""
print("Setting up acquisition models...")
# From the arguments
algo = str(args['--algorithm'])
nsub = int(args['--numSubsets']) if args['--numSubsets'] and algo=='spdhg' else 1
norm_file = args['--norm']
verbosity = int(args['--verbosity'])
if not use_gpu:
acq_models = [pet.AcquisitionModelUsingRayTracingMatrix() for k in range(nsub * num_ms)]
else:
acq_models = [pet.AcquisitionModelUsingNiftyPET() for k in range(nsub * num_ms)]
for acq_model in acq_models:
acq_model.set_use_truncation(True)
acq_model.set_cuda_verbosity(verbosity)
acq_model.set_num_tangential_LORs(10)
# create masks
im_one = image.clone().allocate(1.)
masks = []
# If present, create ASM from ECAT8 normalisation data
asm_norm = None
if norm_file:
if not path.isfile(norm_file):
raise error("Norm file not found: " + norm_file)
asm_norm = pet.AcquisitionSensitivityModel(norm_file)
# Loop over each motion state
for ind in range(num_ms):
# Create attn ASM if necessary
asm_attn = None
if resampled_attns:
s = sinos[ind]
ra = resampled_attns[ind]
am = pet.AcquisitionModelUsingRayTracingMatrix()
asm_attn = get_asm_attn(s,ra,am)
# Get ASM dependent on attn and/or norm
asm = None
if asm_norm and asm_attn:
if ind == 0:
print("ASM contains norm and attenuation...")
asm = pet.AcquisitionSensitivityModel(asm_norm, asm_attn)
elif asm_norm:
if ind == 0:
print("ASM contains norm...")
asm = asm_norm
elif asm_attn:
if ind == 0:
print("ASM contains attenuation...")
asm = asm_attn
# Loop over physical subsets
for k in range(nsub):
current = k * num_ms + ind
if asm:
acq_models[current].set_acquisition_sensitivity(asm)
#KT we'll set the background in the KL function below
#KTif len(rands) > 0:
#KT acq_models[ind].set_background_term(rands[ind])
# Set up
acq_models[current].set_up(sinos[ind], image)
acq_models[current].num_subsets = nsub
acq_models[current].subset_num = k
# compute masks
if ind==0:
mask = acq_models[current].direct(im_one)
masks.append(mask)
# rescale by number of gates
if num_ms > 1:
acq_models[current] = ScaledOperator(acq_models[current], 1./num_ms)
return acq_models, masks
def get_asm_attn(sino, attn, acq_model):
"""Get attn ASM from sino, attn image and acq model."""
asm_attn = pet.AcquisitionSensitivityModel(attn, acq_model)
# temporary fix pending attenuation offset fix in STIR:
# converting attenuation into 'bin efficiency'
asm_attn.set_up(sino)
bin_eff = pet.AcquisitionData(sino)
bin_eff.fill(1.0)
asm_attn.unnormalise(bin_eff)
asm_attn = pet.AcquisitionSensitivityModel(bin_eff)
return asm_attn
def set_up_reconstructor(use_gpu, num_ms, acq_models, resamplers, masks, sinos, rands=None):
"""Set up reconstructor."""
# From the arguments
algo = str(args['--algorithm'])
regularizer = str(args['--reg'])
r_iters = int(args['--reg_iters'])
r_alpha = float(args['--alpha'])
nsub = int(args['--numSubsets']) if args['--numSubsets'] and algo=='spdhg' else 1
precond = True if args['--precond'] else False
param_path = str(args['--param_path'])
normalise = True if args['--normaliseDataAndBlock'] else False
gamma = float(args['--gamma'])
output_name = str(args['--outp'])
if not os.path.exists(param_path):
os.makedirs(param_path)
if normalise:
raise error('options {} and regularization={} are not yet implemented together'.format(normalise, regularizer))
# We'll need an additive term (eta). If randoms are present, use them
# Else, use a scaled down version of the sinogram
etas = rands if rands else [sino * 0 + 1e-5 for sino in sinos]
# Create composition operators containing linear
# acquisition models and resamplers,
# and create data fit functions
if nsub == 1:
if resamplers is None:
#KT C = [am.get_linear_acquisition_model() for am in acq_models]
C = [am for am in acq_models]
else:
C = [CompositionOperator(
#KTam.get_linear_acquisition_model(),
am,
res, preallocate=True)
for am, res in zip(*(acq_models, resamplers))]
fi = [KullbackLeibler(b=sino, eta=eta, mask=masks[0].as_array(),use_numba=True)
for sino, eta in zip(sinos, etas)]
else:
C = [am for am in acq_models]
fi = [None] * (num_ms * nsub)
for (k,i) in np.ndindex((nsub,num_ms)):
# resample if needed
if resamplers is not None:
C[k * num_ms + i] = CompositionOperator(
#KTam.get_linear_acquisition_model(),
C[k * num_ms + i],
resamplers[i], preallocate=True)
fi[k * num_ms + i] = KullbackLeibler(b=sinos[i], eta=etas[i], mask=masks[k].as_array(),use_numba=True)
if regularizer == "FGP_TV":
r_tolerance = 1e-7
r_iso = 0
r_nonneg = 1
r_printing = 0
device = 'gpu' if use_gpu else 'cpu'
G = FGP_TV(r_alpha, r_iters, r_tolerance,
r_iso, r_nonneg, r_printing, device)
if precond:
FGP_TV.proximal = precond_proximal
elif regularizer == "None":
G = IndicatorBox(lower=0)
else:
raise error("Unknown regularisation")
F = BlockFunction(*fi)
K = BlockOperator(*C)
if algo == 'spdhg':
prob = [1./ len(K)] * len(K)
else:
prob = None
if not precond:
if algo == 'pdhg':
# we want the norm of the whole physical BlockOp
normK = get_proj_norm(BlockOperator(*C),param_path)
sigma = gamma/normK
tau = 1/(normK*gamma)
elif algo == 'spdhg':
# we want the norm of each component
normK = get_proj_normi(BlockOperator(*C),nsub,param_path)
# we'll let spdhg do its default implementation
sigma = None
tau = None
use_axpby = False
else:
normK=None
if algo == 'pdhg':
tau = K.adjoint(K.range_geometry().allocate(1.))
# CD take care of edge of the FOV
filter = pet.TruncateToCylinderProcessor()
filter.apply(tau)
backproj_np = tau.as_array()
vmax = np.max(backproj_np[backproj_np>0])
backproj_np[backproj_np==0] = 10 * vmax
tau_np = 1/backproj_np
tau.fill(tau_np)
# apply filter second time just to be sure
filter.apply(tau)
tau_np = tau.as_array()
tau_np[tau_np==0] = 1 / (10 * vmax)
elif algo == 'spdhg':
taus_np = []
for (Ki,pi) in zip(K,prob):
tau = Ki.adjoint(Ki.range_geometry().allocate(1.))
# CD take care of edge of the FOV
filter = pet.TruncateToCylinderProcessor()
filter.apply(tau)
backproj_np = tau.as_array()
vmax = np.max(backproj_np[backproj_np>0])
backproj_np[backproj_np==0] = 10 * vmax
tau_np = 1/backproj_np
tau.fill(tau_np)
# apply filter second time just to be sure
filter.apply(tau)
tau_np = tau.as_array()
tau_np[tau_np==0] = 1 / (10 * vmax)
taus_np.append(pi * tau_np)
taus = np.array(taus_np)
tau_np = np.min(taus, axis = 0)
tau.fill(tau_np)
# save
np.save('{}/tau_{}.npy'.format(param_path, output_name), tau_np, allow_pickle=True)
i = 0
sigma = []
xx = K.domain_geometry().allocate(1.)
for Ki in K:
tmp_np = Ki.direct(xx).as_array()
tmp_np[tmp_np==0] = 10 * np.max(tmp_np)
sigmai = Ki.range_geometry().allocate(0.)
sigmai.fill(1/tmp_np)
sigma.append(sigmai)
# save
# np.save('{}/sigma_{}.npy'.format(param_path,i), 1/tmp_np, allow_pickle=True)
i += 1
sigma = BlockDataContainer(*sigma)
# trade-off parameter
sigma *= gamma
tau *= (1/gamma)
use_axpby = False
return [F, G, K, normK, tau, sigma, use_axpby, prob, gamma]
def set_up_explicit_reconstructor(use_gpu, num_ms, image, acq_models, resamplers, masks, sinos, rands=None):
"""Set up reconstructor."""
# From the arguments
algo = str(args['--algorithm'])
r_alpha = float(args['--alpha'])
nsub = int(args['--numSubsets']) if args['--numSubsets'] and algo=='spdhg' else 1
precond = True if args['--precond'] else False
param_path = str(args['--param_path'])
normalise = True if args['--normaliseDataAndBlock'] else False
gamma = float(args['--gamma'])
if precond:
raise error('Options precond and explicit TV are not yet implemented together')
# We'll need an additive term (eta). If randoms are present, use them
# Else, use a scaled down version of the sinogram
etas = rands if rands else [sino * 0 + 1e-5 for sino in sinos]
# Create composition operators containing linear
# acquisition models and resamplers,
# and create data fit functions
if nsub == 1:
if resamplers is None:
#KT C = [am.get_linear_acquisition_model() for am in acq_models]
C = [am for am in acq_models]
else:
C = [CompositionOperator(
#KTam.get_linear_acquisition_model(),
am,
res, preallocate=True)
for am, res in zip(*(acq_models, resamplers))]
fi = [KullbackLeibler(b=sino, eta=eta, mask=masks[0].as_array(),use_numba=True)
for sino, eta in zip(sinos, etas)]
else:
C = [am for am in acq_models]
fi = [None] * (num_ms * nsub)
for (k,i) in np.ndindex((nsub,num_ms)):
# resample if needed
if resamplers is not None:
C[k * num_ms + i] = CompositionOperator(
#KTam.get_linear_acquisition_model(),
C[k * num_ms + i],
resamplers[i], preallocate=True)
fi[k * num_ms + i] = KullbackLeibler(b=sinos[i], eta=etas[i], mask=masks[k].as_array(),use_numba=True)
# define gradient
Grad = GradientOperator(image, backend='c', correlation='SpaceChannel')
normGrad = get_grad_norm(Grad,param_path)
# define data fit
data_fit = MixedL21Norm()
MixedL21Norm.proximal = MixedL21Norm_proximal
if algo == 'pdhg':
# we want the norm of the whole physical BlockOp
normProj = get_proj_norm(BlockOperator(*C),param_path)
if normalise:
C_rs = [ScaledOperator(Ci,1/normProj) for Ci in C]
Grad_rs = ScaledOperator(Grad,1/normGrad)
C_rs.append(Grad_rs)
f_rs = [ScaledFunction(f,normProj)
for f in fi]
f_rs.append(ScaledFunction(data_fit,r_alpha * normGrad))
normK = np.sqrt(2)
else:
C.append(Grad)
fi.append(ScaledFunction(data_fit,r_alpha))
normK = np.sqrt(normProj**2 + normGrad**2)
sigma = gamma/normK
tau = 1/(normK*gamma)
prob = None
elif algo == 'spdhg':
# we want the norm of each component
normProj = get_proj_normi(BlockOperator(*C),nsub,param_path)
if normalise:
C_rs = [ScaledOperator(Ci,1/normProji) for Ci, normProji in zip(C,normProj)]
Grad_rs = ScaledOperator(Grad,1/normGrad)
C_rs.append(Grad_rs)
f_rs = [ScaledFunction(f,normProji)
for f, normProji in zip(fi, normProj)]
f_rs.append(ScaledFunction(data_fit,r_alpha * normGrad))
normK = [1.] * len(C_rs)
prob = [1./(2 * (len(C_rs)-1))] * (len(C_rs)-1) + [1./2]
else:
C.append(Grad)
fi.append(ScaledFunction(data_fit,r_alpha))
normK = normProj + [normGrad]
prob = [1./(2 * (len(C)-1))] * (len(C)-1) + [1./2]
# we'll let spdhg do its default stepsize implementation
sigma = None
tau = None
else:
raise error("algorithm '{}' is not implemented".format(algo))
G = IndicatorBox(lower=0)
if normalise:
F = BlockFunction(*f_rs)
K = BlockOperator(*C_rs)
else:
F = BlockFunction(*fi)
K = BlockOperator(*C)
use_axpby = False
return [F, G, K, normK, tau, sigma, use_axpby, prob, gamma]
def PowerMethod(operator, x_init=None):
'''Power method to calculate iteratively the Lipschitz constant
:param operator: input operator
:type operator: :code:`LinearOperator`
:param iterations: number of iterations to run
:type iteration: int
:param x_init: starting point for the iteration in the operator domain
:returns: tuple with: L, list of L at each iteration, the data the iteration worked on.
'''
# From the arguments
iterations = int(args['--PowerMethod_iters'])
# Initialise random
if x_init is None:
x0 = operator.domain_geometry().allocate('random')
else:
x0 = x_init.copy()
x1 = operator.domain_geometry().allocate()
y_tmp = operator.range_geometry().allocate()
s = []
# Loop
i = 0
while i < iterations:
operator.direct(x0,out=y_tmp)
operator.adjoint(y_tmp,out=x1)
x1norm = x1.norm()
if hasattr(x0, 'squared_norm'):
s.append( x1.dot(x0) / x0.squared_norm() )
else:
x0norm = x0.norm()
s.append( x1.dot(x0) / (x0norm * x0norm) )
x1.multiply((1.0/x1norm), out=x0)
print ("current squared norm: {}".format(s[-1]))
i += 1
return | np.sqrt(s[-1]) | numpy.sqrt |
"""Test tilted backpropagation algorithm"""
import numpy as np
import odtbrain
from common_methods import create_test_sino_3d, create_test_sino_3d_tilted, \
cutout, get_test_parameter_set
def test_3d_backprop_phase_real():
sino, angles = create_test_sino_3d()
parameters = get_test_parameter_set(2)
# reference
rref = list()
for p in parameters:
fref = odtbrain.backpropagate_3d(sino, angles, padval=0,
dtype=np.float64, onlyreal=True, **p)
rref.append(cutout(fref))
dataref = np.array(rref).flatten().view(float)
r = list()
for p in parameters:
f = odtbrain.backpropagate_3d_tilted(sino, angles, padval=0,
dtype=np.float64, onlyreal=True,
**p)
r.append(cutout(f))
data = np.array(r).flatten().view(float)
assert np.allclose(data, dataref)
def test_3d_backprop_pad():
sino, angles = create_test_sino_3d()
parameters = get_test_parameter_set(2)
# reference
rref = list()
for p in parameters:
fref = odtbrain.backpropagate_3d(sino, angles, padval="edge",
dtype=np.float64, onlyreal=False, **p)
rref.append(cutout(fref))
dataref = np.array(rref).flatten().view(float)
r = list()
for p in parameters:
f = odtbrain.backpropagate_3d_tilted(sino, angles, padval="edge",
dtype=np.float64, onlyreal=False,
**p)
r.append(cutout(f))
data = np.array(r).flatten().view(float)
assert np.allclose(data, dataref)
def test_3d_backprop_plane_rotation():
"""
A very soft test to check if planar rotation works fine
in the reconstruction with tilted angles.
"""
parameters = get_test_parameter_set(1)
results = []
# These are specially selected angles that don't give high results.
# Probably due to phase-wrapping, errors >2 may appear. Hence, we
# call it a soft test.
tilts = [1.1, 0.0, 0.234, 2.80922, -.29, 9.87]
for angz in tilts:
sino, angles = create_test_sino_3d_tilted(tilt_plane=angz, A=21)
rotmat = np.array([
[np.cos(angz), -np.sin(angz), 0],
[np.sin(angz), np.cos(angz), 0],
[0, 0, 1],
])
# rotate `tilted_axis` onto the y-z plane.
tilted_axis = np.dot(rotmat, [0, 1, 0])
rref = list()
for p in parameters:
fref = odtbrain.backpropagate_3d_tilted(sino, angles,
padval="edge",
tilted_axis=tilted_axis,
padding=(False, False),
dtype=np.float64,
onlyreal=False,
**p)
rref.append(cutout(fref))
data = np.array(rref).flatten().view(float)
results.append(data)
for ii in np.arange(len(results)):
assert np.allclose(results[ii], results[ii-1], atol=.2, rtol=.2)
def test_3d_backprop_plane_alignment_along_axes():
"""
Tests whether the reconstruction is always aligned with
the rotational axis (and not antiparallel).
"""
parameters = get_test_parameter_set(1)
p = parameters[0]
results = []
# These are specially selected angles that don't give high results.
# Probably due to phase-wrapping, errors >2 may appear. Hence, we
# call it a soft test.
tilts = [0, np.pi/2, np.pi, 3*np.pi/2, 2*np.pi]
for angz in tilts:
sino, angles = create_test_sino_3d_tilted(tilt_plane=angz, A=21)
rotmat = np.array([
[np.cos(angz), -np.sin(angz), 0],
[np.sin(angz), | np.cos(angz) | numpy.cos |
from copy import deepcopy
from functools import reduce
import operator
import numpy as np
import pandas as pd
from pandas.api.types import is_string_dtype
import pandapower as pp
import pandapower.topology as top
from pandapower.grid_equivalents.auxiliary import drop_internal_branch_elements, ensure_origin_id
from pandapower.grid_equivalents.get_equivalent import get_equivalent, \
merge_internal_net_and_equivalent_external_net
try:
import pandaplan.core.pplog as logging
except ImportError:
import logging
try:
from misc.groups import Group
group_imported = True
except ImportError:
group_imported = False
try:
from simbench import voltlvl_idx
simbench_imported = True
except ImportError:
simbench_imported = False
logger = logging.getLogger(__name__)
def getFromDict(dict_, keys):
""" Get value from nested dict """
return reduce(operator.getitem, keys, dict_)
def setInDict(dict_, keys, value):
""" Set value to nested dict """
getFromDict(dict_, keys[:-1])[keys[-1]] = value
def appendSetInDict(dict_, keys, set_):
""" Use case specific: append existing value of type set in nested dict """
getFromDict(dict_, keys[:-1])[keys[-1]] |= set_
def setSetInDict(dict_, keys, set_):
""" Use case specific: set new or append existing value of type set in nested dict """
if isinstance(getFromDict(dict_, keys[:-1]), dict):
if keys[-1] in getFromDict(dict_, keys[:-1]).keys():
if isinstance(getFromDict(dict_, keys), set):
appendSetInDict(dict_, keys, set_)
else:
raise ValueError("The set in the nested dict cannot be appended since it actually "
"is not a set but a " + str(type(getFromDict(dict_, keys))))
else:
setInDict(dict_, keys, set_)
else:
raise ValueError("This function expects a dict for 'getFromDict(dict_, " + str(keys[:-1]) +
")', not a" + str(type(getFromDict(dict_, keys[:-1]))))
def append_set_to_dict(dict_, set_, keys):
""" Appends a nested dict by the values of a set, independant if the keys already exist or not.
"""
keys = pp.ensure_iterability(keys)
# ensure that the dict way to the last key exist
for pos, _ in enumerate(keys[:-1]):
if isinstance(getFromDict(dict_, keys[:pos]), dict):
if keys[pos] not in getFromDict(dict_, keys[:pos]).keys():
setInDict(dict_, keys[:pos + 1], dict())
else:
raise ValueError("This function expects a dict for 'getFromDict(dict_, " +
str(keys[:pos]) + ")', not a" + str(type(getFromDict(
dict_, keys[:pos]))))
# set the value
setSetInDict(dict_, keys, set_)
def eq_name(eq_type, other_zone=None, zone=None, number=None):
number_str = "" if number is None else " %i" % number
st = "%s%s equivalent" % (eq_type, number_str)
if other_zone is not None:
st += " of zone "
if isinstance(other_zone, str):
st += "'%s'" % other_zone
else:
st += str(other_zone)
if zone is not None:
st += " at zone "
if isinstance(zone, str):
st += "'%s'" % zone
else:
st += str(zone)
return st
def set_bus_zone_by_boundary_branches(net, all_boundary_branches):
"""
Set integer values (0, 1, 2, ...) to net.bus.zone with regard to the given boundary branches in
'all_boundary_branches'.
INPUT:
**net** - pandapowerNet
**all_boundary_branches** (dict) - defines which element indices are boundary branches.
The dict keys must be pandapower elements, e.g. "line" or "trafo"
"""
include = dict.fromkeys(["line", "dcline", "trafo", "trafo3w", "impedance"])
for elm in include.keys():
if elm in all_boundary_branches.keys():
include[elm] = net[elm].index.difference(all_boundary_branches[elm])
else:
include[elm] = True
mg = top.create_nxgraph(net, include_lines=include["line"], include_impedances=include["impedance"],
include_dclines=include["dcline"], include_trafos=include["trafo"],
include_trafo3ws=include["trafo3w"])
cc = top.connected_components(mg)
ccl = [list(c) for c in cc]
areas = []
while len(ccl):
# check intersections of the first area with all other unchecked areas (remains in ccl) and
# then add first area unionized with all intersectioned other areas to "areas"
areas += [ccl.pop(0)]
n_last_area = -1
while n_last_area != len(areas[-1]):
# check as long as len(areas[-1]) not changes anymore - needed because there can be
# intersections of remaining areas with the buses added to areas[-1]
# within the last while loop iteration via union
n_last_area = len(areas[-1])
for i, c in enumerate(ccl):
if np.intersect1d(c, areas[-1]):
areas[-1] = np.union1d(areas[-1], ccl.pop(i))
for i, area in enumerate(areas):
net.bus.zone.loc[area] = i
def get_branch_power(net, bus, power_type, branches_dict=None):
"""
Sums power of branches connected to 'bus'. The power is summed negative (= how much power flows
into the bus).
INPUT:
**net** - pandapower net
**bus** (int) - index of the bus whose connected branches power flows are summed
**power_type** (str) - should be "p_mw" or "q_mvar"
OPTIONAL:
**branches_dict** (dict, None) - if given, only branches within 'branches_dict' are
considered for summing the power. An exemplary input is {"line": {0, 1, 2}, "trafo": {1}}.
"""
connected_branches = pp.get_connected_elements_dict(
net, [bus], connected_buses=False, connected_bus_elements=False,
connected_branch_elements=True, connected_other_elements=False)
power = 0
bus_types = ["from_bus", "to_bus", "hv_bus", "lv_bus", "mv_bus"]
for elm, idxs in connected_branches.items():
if branches_dict is not None:
if elm in branches_dict.keys():
idxs = set(branches_dict[elm]).intersection(set(idxs))
else:
continue
for idx in idxs:
for bus_type in bus_types:
if bus_type in net[elm].columns and net[elm][bus_type].at[idx] == bus:
col = power_type[0] + "_" + bus_type.split("_")[0] + power_type[1:]
power -= net["res_" + elm][col].at[idx]
break
return power
def _create_eq_elms(net, buses, elm, branches=None, idx_start=None, sign=1,
name=None, zone=None, other_zone=None, **kwargs):
"""
Internal function of create_eq_loads() or create_eq_gens()
"""
name = name if name is not None else f"equivalent {elm}"
# --- check existing results and return if not available
cols = {"load": ["p_mw", "q_mvar"], "gen": ["p_mw", "vm_pu"]}[elm]
if len(buses - set(net.res_bus.index)) or net.res_bus.loc[
buses, cols].isnull().any().any():
logger.warning(f"No {elm}s could be added to 'net_ib_eq_load' since bus results " +
"are missing.")
return pd.Index()
# --- run functionality
if branches is not None:
branches_has_buses_keys = not len(set(branches.keys()).symmetric_difference(set(buses)))
names = pp.ensure_iterability(name, len(buses))
new_idx = []
for no, (bus, name) in enumerate(zip(buses, names)):
bra = branches if branches is None or not branches_has_buses_keys else branches[bus]
idx = idx_start + no if idx_start is not None else None
p = sign * get_branch_power(net, bus, "p_mw", bra)
if elm == "load":
q = sign * get_branch_power(net, bus, "q_mvar", bra)
new = pp.create_load(net, bus, p, q, name=name, index=idx, **kwargs)
elif elm == "gen":
vm = net.res_bus.vm_pu.at[bus]
new = pp.create_gen(net, bus, p, vm, name=name, index=idx, **kwargs)
else:
raise NotImplementedError(f"elm={elm} is not implemented.")
if "origin_id" in net[elm].columns:
net[elm].origin_id.loc[new] = eq_name(elm, other_zone, zone, number=no)
new_idx.append(new)
return pd.Index(new_idx)
def create_eq_loads(net, buses, branches=None, idx_start=None, sign=1,
name="equivalent load", zone=None, other_zone=None, **kwargs):
"""
Create loads at 'buses' with p and q values equal to sums of p, q power flows over the given
branches.
INPUT:
**net** - pandapower net to be manipulated
**buses** (iterable) - buses at which additional loads should be created
**branches** (dict of (element: set of element indices) or dict of those (with buses as
keys)) - selection of branches to be considered to sum p and q power flows to be set to the
loads. If None, within all branches, all connecting branches must be found and are then
considered for summation.
Example 1: {'trafo': {0, 1, 2}}
Example 2: {1: {'trafo': {0, 1}}, 2: {'trafo': {2}} (if buses is [1, 2])
OPTIONAL:
**idx_start** (int, None) - Starting index for creating the loads. I.e. if 'idx_start' == 3
and len(buses) == 2, then the indices of the created loads will be 3 and 4.
**sign** (1 or -1, 1) - If 1, load get the power which flows out of the branches.
**name** (str or iterable of strings (with length of buses), 'equivalent load') - Value
to be set to the new net.load.name
**zone** (value, None) - This value will be included in net.load.origin_id, if
this column exist
**other_zone** (value, None) - This value will be included in net.load.origin_id, if
this column exist
****kwargs** - key word arguments for pp.create_load(), e.g. in_service=False.
OUTPUT:
new_idx - list of indices of the new loads
"""
return _create_eq_elms(net, buses, "load", branches=branches, idx_start=idx_start, sign=sign,
name=name, zone=zone, other_zone=other_zone, **kwargs)
def create_eq_gens(net, buses, branches=None, idx_start=None, sign=-1,
name="equivalent gen", zone=None, other_zone=None, **kwargs):
""" Same as create_eq_loads """
return _create_eq_elms(net, buses, "gen", branches=branches, idx_start=idx_start, sign=sign,
name=name, zone=zone, other_zone=other_zone, **kwargs)
def split_grid_by_bus_zone(net, boundary_bus_zones=None, eq_type=None, separate_eqs=True,
**kwargs):
"""
INPUT:
**net** - pandapower net - In net.bus.zone different zones must be given.
OPTIONAL:
**boundary_bus_zones** - strings in net.bus.zone which are to be considered as boundary
buses
**eq_type** (str, None) - If given, equivalent elements are added to the boundaries
**separate_eqs** (bool, True) - Flag whether the equivalents (if eq_type is given)
should be calculated by each external zone indivudually
****kwargs** key word arguments such as "only_connected_groups"
OUTPUT:
**nets_ib** - dict of subnets
**boundary_buses** - dict of boundary buses (details at the docstrings of the subfunctions)
A difference between with boundary_bus_zones and without is that the letter does
additionally contain the key "internal".
"""
if net.bus.zone.isnull().any():
raise ValueError("There are NaNs in net.bus.zone")
if boundary_bus_zones is None:
return nets_ib_by_bus_zone_with_boundary_branches(
net, eq_type=eq_type, separate_eqs=separate_eqs, **kwargs)
else:
return split_grid_by_bus_zone_with_boundary_buses(
net, boundary_bus_zones, eq_type=eq_type, separate_eqs=separate_eqs, **kwargs)
def get_boundaries_by_bus_zone_with_boundary_branches(net):
"""
Only in_service branches and closed switches are considered.
INPUT:
**net** - pandapower net - In net.bus.zone different zones must be given.
OUTPUT:
**boundary_buses** - dict of boundary buses - for each zone the internal and external
boundary buses are given as well as the external boundary buses for each other zone.
Furthermore the value of the boundary_buses key "all" concludes all boundary buses of all
zones.
Example:
{"all": {0, 1, 3},
1: {"all": {0, 1, 3},
"external": {3},
"internal": {0, 1},
2: {3}
},
2: {"all": {1, 2, 3, 4},
"external": {0, 1, 4},
"internal": {2, 3},
1: {0, 1},
3: {4}
},
3: {"all": {2, 4},
"external": {2},
"internal": {4},
2: {2}
}
}
**boundary_branches** - dict of branch elements - for each zone a set of the corresponding
boundary boundary branches as well as "all" boundary branches
Example:
{"all": {"line": {0, 1},
"trafo": {0}
},
1: {"line": {0},
"trafo": {0}
},
2: {"line": {0, 1}},
3: {"line": {1}}
}
"""
def append_boundary_buses_externals_per_zone(boundary_buses, boundaries, zone, other_zone_cols):
""" iterate throw all boundaries which matches this_zone and add the other_zone_bus to
boundary_buses """
for idx, ozc in other_zone_cols.iteritems():
other_zone = boundaries[zone_cols].values[idx, ozc]
if isinstance(other_zone, np.generic):
other_zone = other_zone.item()
if zone == other_zone:
continue # this happens if 2 trafo3w connections are in zone
other_zone_bus = boundaries[buses].values[idx, ozc]
append_set_to_dict(boundary_buses, {other_zone_bus}, [zone, other_zone])
if "all" in set(net.bus.zone.values):
raise ValueError("'all' is not a proper zone name.") # all is used later for other purpose
branch_elms = pp.pp_elements(bus=False, bus_elements=False, branch_elements=True,
other_elements=False, res_elements=False)
branch_tuples = pp.element_bus_tuples(bus_elements=False, branch_elements=True,
res_elements=False) | {("switch", "element")}
branch_dict = {branch_elm: [] for branch_elm in branch_elms}
for elm, bus in branch_tuples:
branch_dict[elm] += [bus]
zones = net.bus.zone.unique()
boundary_branches = {zone if net.bus.zone.dtype == object else zone.item():
dict() for zone in zones}
boundary_branches["all"] = dict()
boundary_buses = {zone if net.bus.zone.dtype == object else zone.item():
{"all": set(), "internal": set(), "external": set()} for zone in zones}
boundary_buses["all"] = set()
for elm, buses in branch_dict.items():
idx = net[elm].index[net[elm].in_service] if elm != "switch" else net.switch.index[
(net.switch.et == "b") & net.switch.closed]
boundaries = deepcopy(net[elm][buses].loc[idx]) # copy relevant info from net[elm]
zone_cols = list()
for i, bus_col in enumerate(buses):
# add to which zones the connected buses belong to
boundaries["zone%i" % i] = net.bus.zone.loc[boundaries[bus_col].values].values
zone_cols.append("zone%i" % i)
# compare the zones and conclude if the branches can be boundaries
if i > 0:
boundaries["is_boundary"] |= boundaries["zone%i" % i] != boundaries["zone0"]
else:
boundaries["is_boundary"] = False
# reduce the DataFrame 'boundaries' to those branches which actually are boundaries
boundaries = boundaries.loc[boundaries["is_boundary"],
boundaries.columns.difference(["is_boundary"])]
# determine boundary_branches and boundary_buses
if len(boundaries):
boundary_branches["all"][elm] = set(boundaries.index)
boundary_buses["all"] |= set(boundaries[buses].values.flatten())
for zone in set(boundaries[zone_cols].values.flatten()):
# determine which columns belong to this zone and which not
this_zone_col = np.zeros(boundaries.shape[0]) * np.nan
for i, _ in enumerate(buses):
this_zone_col[boundaries[zone_cols[i]] == zone] = i
this_zone_col = pd.Series(this_zone_col).dropna().astype(int)
other_zone_col1 = pd.Series(np.ones(this_zone_col.shape, dtype=int),
index=this_zone_col.index) - this_zone_col
if len(buses) == 3:
other_zone_col1.loc[other_zone_col1 < 0] = 0
other_zone_col2 = pd.Series(3 * np.ones(this_zone_col.shape, dtype=int),
index=this_zone_col.index) - \
this_zone_col - other_zone_col1
# fill zone dependant values to boundary_branches and boundary_buses
boundary_branches[zone][elm] = set(boundaries.index[this_zone_col.index])
nint = set(boundaries[buses].values[this_zone_col.index, this_zone_col.values])
ext = set(boundaries[buses].values[other_zone_col1.index, other_zone_col1.values])
boundary_buses[zone]["internal"] |= nint
boundary_buses[zone]["external"] |= ext
boundary_buses[zone]["all"] |= ext | nint
if len(buses) == 3:
ext = set(boundaries[buses].values[
other_zone_col2.index, other_zone_col2.values])
boundary_buses[zone]["external"] |= ext
boundary_buses[zone]["all"] |= ext
append_boundary_buses_externals_per_zone(
boundary_buses, boundaries, zone, other_zone_col1)
if len(buses) == 3:
append_boundary_buses_externals_per_zone(
boundary_buses, boundaries, zone, other_zone_col2)
# check for missing zone connections
zones_without_connection = list()
for zone, bra in boundary_branches.items():
if zone != "all" and not bra:
zones_without_connection.append(zone)
if len(zones_without_connection):
logger.warning("These zones have no connections to other zones: " + str(
zones_without_connection))
return boundary_buses, boundary_branches
def nets_ib_by_bus_zone_with_boundary_branches(
net, eq_type=None, separate_eqs=True, duplicated_boundary_bus_elements=True, **kwargs):
"""
INPUT:
**net** - pandapower net - In net.bus.zone different zones must be given.
OPTIONAL:
**eq_type** (str, None) - If given, equivalent elements are added to the boundaries
**separate_eqs** (bool, True) - If True and if eq_type is not None, subnets with
different zones are represented by indivuduals equivalents
**duplicated_boundary_bus_elements** (bool, True) - if True, bus elements at boundary buses
are included in all connected nets_ib. If False, only the first nets_ib includes these bus
elements.
OUTPUT:
**nets_ib** - dict of pandapower nets - the internal grid and boundary grid of each zone
as far as the external boundary buses
**boundary_buses** - dict of boundary buses - for each zone the internal and external
boundary buses are given. Furthermore the value of the boundary_buses key "all" concludes
all boundary buses of all zones
**boundary_branches** - dict of branch elements - for each zone a set of the corresponding
boundary boundary branches as well as "all" boundary branches
"""
eq_type = eq_type if not isinstance(eq_type, str) else eq_type.lower()
if eq_type is not None and eq_type not in ["load", "gen", "rei", "ward", "xward"]:
raise ValueError("eq_type %s is unknown." % str(eq_type))
nets_ib = dict() # internal + boundary: the internal and up to the external boundary buses
# are included
boundary_buses, boundary_branches = get_boundaries_by_bus_zone_with_boundary_branches(net)
# --- define nets_ib with consideration of eq_type (very similar to part of
# split_grid_by_bus_zone_with_boundary_buses())
n_add_elm1 = 1
fully_included_boundary_buses = set()
for zone in boundary_buses.keys():
if zone == "all":
continue
this_zone_buses = set(net.bus.index[net.bus.zone == zone])
# direct calculation of nets_ib and continue
if eq_type in ["rei", "ward", "xward"] and not separate_eqs:
nets_ib[zone] = get_equivalent(
net, eq_type, boundary_buses=boundary_buses[zone]["external"],
internal_buses=this_zone_buses, elm_col=kwargs.get("elm_col", None))
continue
# all other cases comes here: ...
nets_ib[zone] = pp.select_subnet(
net, this_zone_buses.union(boundary_buses[zone]["external"]), include_results=True)
nets_ib[zone]["bus"].sort_index(inplace=True)
if not duplicated_boundary_bus_elements:
bb2dr = fully_included_boundary_buses & boundary_buses[zone]["internal"]
pp.drop_elements_at_buses(nets_ib[zone], bb2dr, branch_elements=False)
fully_included_boundary_buses |= boundary_buses[zone]["internal"]
# drop all elements at external boundary buses
pp.drop_inner_branches(nets_ib[zone], boundary_buses[zone]["external"])
for elm in pp.pp_elements(bus=False, branch_elements=False, other_elements=False):
pp.drop_elements_at_buses(nets_ib[zone], boundary_buses[zone]["external"],
branch_elements=False, drop_measurements=False)
if eq_type is None:
continue
elif eq_type in ["load", "gen"] and not separate_eqs:
create_eq_fct = {"load": create_eq_loads, "gen": create_eq_gens}[eq_type]
eq_elms = create_eq_fct(nets_ib[zone], boundary_buses[zone]["external"],
idx_start=net[eq_type].index.values.max() + n_add_elm1,
zone=zone)
n_add_elm1 += len(eq_elms)
if group_imported:
elm_col = kwargs.get("elm_col", None)
if elm_col is not None:
eq_elms = nets_ib[zone][eq_type][elm_col].loc[eq_elms]
Group(nets_ib[zone], {eq_type: eq_elms}, name=eq_name(eq_type, zone=zone),
elm_col=elm_col)
continue
if not separate_eqs:
logger.error("With separate_eqs is False, this point should not be"
"reached. Hereafter other_zones are iterated")
continue
for other_zone, bb in boundary_buses[zone].items():
if other_zone in ["all", "internal", "external"]:
continue
if eq_type in ["load", "gen"] and separate_eqs:
create_eq_fct = {"load": create_eq_loads, "gen": create_eq_gens}[eq_type]
eq_elms = create_eq_fct(nets_ib[zone], bb,
idx_start=net[eq_type].index.values.max() + n_add_elm1,
zone=zone, other_zone=other_zone)
n_add_elm1 += len(eq_elms)
if group_imported:
elm_col = kwargs.get("elm_col", None)
if elm_col is not None:
eq_elms = nets_ib[zone][eq_type][elm_col].loc[eq_elms]
Group(nets_ib[zone], {eq_type: eq_elms},
name=eq_name(eq_type, other_zone, zone), elm_col=elm_col)
elif eq_type in ["rei", "ward", "xward"] and separate_eqs:
raise NotImplementedError("eq_type '%s' and separate_eqs is %s is not implemented" %
(eq_type, str(separate_eqs)))
else:
raise NotImplementedError("This else should not be reached!")
return nets_ib, boundary_buses, boundary_branches
def split_grid_by_bus_zone_with_boundary_buses(
net, boundary_bus_zones, eq_type=None, separate_eqs=True, only_connected_groups=False,
duplicated_boundary_bus_elements=True, **kwargs):
"""
INPUT:
**net** (pandapower net) - In net.bus.zone different zones must be given.
**boundary_bus_zones** - values in net.bus.zone which are to be considered as boundary
buses
OPTIONAL:
**eq_type** (str, None) - If given, equivalent elements are added to the boundaries
**separate_eqs** (bool, True) - If True and if eq_type is not None, subnets with
different zones are represented by indivuduals equivalents
**only_connected_groups** (bool, False) - if True, an error is raised if buses of the same
zone are not directly connected
**duplicated_boundary_bus_elements** (bool, True) - if True, bus elements at boundary buses
are included in all connected nets_ib. If False, only the first nets_ib includes these bus
elements.
OUTPUT:
**nets_ib** - dict of pandapower nets - the internal grid and boundary grid of each zone
as far as the external boundary buses
**boundary_buses** - dict of boundary buses - for each zone the external (always equal to
all - since no internal boundaries are considered here)
boundary buses are given as well as the external boundary buses for each other zone.
Furthermore the value of the boundary_buses key "all" concludes all boundary buses of all
zones.
Example:
{"all": {0, 1, 3},
1: {"all": {3},
"external": {3},
2: {3}
},
2: {"all": {0, 1, 4},
"external": {0, 1, 4},
1: {0, 1},
3: {4}
},
3: {"all": {2},
"external": {2},
2: {2}
}
}
**boundary_branches** - empty dict
"""
if is_string_dtype(net.bus["zone"]) and "all" in net.bus.zone.values:
raise ValueError("'all' is not a proper zone name.") # all is used later for other purpose
eq_type = eq_type if not isinstance(eq_type, str) else eq_type.lower()
if eq_type is not None and eq_type not in ["load", "gen", "rei", "ward", "xward"]:
raise ValueError("eq_type %s is unknown." % str(eq_type))
boundary_bus_zones = pp.ensure_iterability(boundary_bus_zones)
# initialize boundary_buses and nets_ib (and boundary_branches)
boundary_buses = {"all": set(net.bus.index[net.bus.zone.isin(boundary_bus_zones)])}
boundary_branches = dict()
nets_ib_buses = dict()
nets_ib = dict()
# create topology graphs and bus groups
mg = top.create_nxgraph(net, nogobuses=boundary_buses["all"])
cc = top.connected_components(mg)
# --- check validity + fill nets_ib_buses and boundary_buses
for bus_group in cc:
zones = net.bus.zone.loc[bus_group].unique()
if len(zones) > 1:
raise ValueError("These zones exist in a group of " + str(len(bus_group)) +
" connected bus which should have only one zone: " + str(zones))
else:
zone = zones[0] if net.bus.zone.dtype == object else zones[0].item()
conn_buses = pp.get_connected_buses(net, bus_group)
conn_other_zone_buses = conn_buses - boundary_buses["all"]
# raise if other buses than boundary_buses["all"] are boundaries
if len(conn_other_zone_buses) > 10:
raise ValueError(str(len(conn_other_zone_buses)) + " buses are connected to zone " +
str(zone) + " buses although they are no boundary buses." % zone)
elif len(conn_other_zone_buses) > 0:
raise ValueError("Theses buses are connected to zone " + str(
zone) + " buses although they are no boundary buses: " + str(conn_other_zone_buses))
if zone in boundary_buses.keys(): # buses of this zone has already been considered in
# boundary_buses by another bus_group -> same zone without connection
message = "Zone " + str(zone) + " exist in multiple bus groups. These are the" + \
" zones of the bus groups: " + str(zones)
if only_connected_groups:
raise ValueError(message)
else:
logger.warning(message)
# fill nets_ib_buses
append_set_to_dict(nets_ib_buses, bus_group | conn_buses, [zone])
# fill boundary_buses[zone]["all"] and boundary_buses[zone]["external"] which is the same
append_set_to_dict(boundary_buses, conn_buses, [zone, "all"])
boundary_buses[zone]["external"] = boundary_buses[zone]["all"]
# fill boundary_buses[zone1][zone2]
for zone1 in nets_ib_buses.keys():
for zone2 in nets_ib_buses.keys():
if zone1 == zone2:
continue
overlap = boundary_buses[zone1]["all"] & boundary_buses[zone2]["all"]
if len(overlap):
append_set_to_dict(boundary_buses, overlap, [zone1, zone2])
append_set_to_dict(boundary_buses, overlap, [zone2, zone1])
# --- define nets_ib with consideration of eq_type (very similar to part of
# split_grid_by_bus_zone_with_boundary_branches())
n_add_elm1 = 1
fully_included_boundary_buses = set()
for zone, buses in nets_ib_buses.items():
if eq_type in ["rei", "ward", "xward"] and not separate_eqs:
nets_ib[zone] = get_equivalent(
net, eq_type, boundary_buses=boundary_buses[zone]["all"],
internal_buses=net.bus.index[net.bus.zone == zone],
elm_col=kwargs.get("elm_col", None))
continue
nets_ib[zone] = pp.select_subnet(net, buses, include_results=True)
nets_ib[zone]["bus"].sort_index(inplace=True)
if not duplicated_boundary_bus_elements:
bb2dr = fully_included_boundary_buses & boundary_buses[zone]["all"]
pp.drop_elements_at_buses(nets_ib[zone], bb2dr, branch_elements=False)
fully_included_boundary_buses |= boundary_buses[zone]["all"]
if eq_type is None:
continue
elif eq_type in ["load", "gen"] and not separate_eqs:
create_eq_fct = {"load": create_eq_loads, "gen": create_eq_gens}[eq_type]
eq_elms = create_eq_fct(nets_ib[zone], boundary_buses[zone]["all"],
idx_start=net[eq_type].index.values.max() + n_add_elm1,
zone=zone)
n_add_elm1 += len(eq_elms)
if group_imported:
elm_col = kwargs.get("elm_col", None)
if elm_col is not None:
eq_elms = nets_ib[zone][eq_type][elm_col].loc[eq_elms]
Group(nets_ib[zone], {eq_type: eq_elms}, name=eq_name(eq_type, zone=zone),
elm_col=elm_col)
continue
if not separate_eqs:
logger.error("With separate_eqs is False, this point should not be"
"reached. Hereafter other_zones are iterated")
continue
for other_zone, bb in boundary_buses[zone].items():
if other_zone in ["all", "external", "internal"]:
continue
if eq_type in ["load", "gen"] and separate_eqs:
create_eq_fct = {"load": create_eq_loads, "gen": create_eq_gens}[eq_type]
eq_elms = create_eq_fct(nets_ib[zone], bb,
idx_start=net[eq_type].index.values.max() + n_add_elm1,
zone=zone, other_zone=other_zone)
n_add_elm1 += len(eq_elms)
if group_imported:
elm_col = kwargs.get("elm_col", None)
if elm_col is not None:
eq_elms = nets_ib[zone][eq_type][elm_col].loc[eq_elms]
Group(nets_ib[zone], {eq_type: eq_elms},
name=eq_name(eq_type, other_zone, zone),
elm_col=elm_col)
elif eq_type in ["rei", "ward", "xward"] and separate_eqs:
raise NotImplementedError("eq_type '%s' and separate_eqs is %s is not implemented" %
(eq_type, str(separate_eqs)))
else:
raise NotImplementedError("This else should not be reached!")
return nets_ib, boundary_buses, boundary_branches
def split_grid_by_bus_zone_with_boundary_branches(net, **kwargs):
"""
INPUT:
**net** - pandapower net - In net.bus.zone different zones must be given.
OPTIONAL:
****kwargs** - with_oos_eq_loads (bool)
OUTPUT:
**boundary_buses** - dict of boundary buses - for each zone the internal and external
boundary buses are given. Furthermore the value of the boundary_buses key "all" concludes
all boundary buses of all zones
**boundary_branches** - dict of branch elements - for each zone a set of the corresponding
boundary boundary branches as well as "all" boundary branches
**nets_i** - dict of pandapower nets - the internal grid of each zone as far as the
internal boundary buses
**nets_ib** - dict of pandapower nets - the internal grid and boundary grid of each zone
as far as the external boundary buses
**nets_ib0** - dict of pandapower nets - same as nets_ib but with elements at the external
boundary buses
**nets_ib_eq_load** - dict of pandapower nets - similar to nets_ib but with equivalent
loads at the external boundary buses instead of original elements at the external boundary
buses
**nets_b** - dict of pandapower nets - the boundary grid connected to each zone
"""
if "all" in set(net.bus.zone.values):
raise ValueError("'all' is not a proper zone name.") # all is used later for other purpose
boundary_buses, boundary_branches = get_boundaries_by_bus_zone_with_boundary_branches(net)
nets_i = dict()
nets_ib = dict()
nets_ib0 = dict()
nets_ib_eq_load = dict()
nets_b = dict()
n_add_load1 = 1
n_add_load2 = 1
for zone in boundary_buses.keys():
if zone == "all":
continue
this_zone_buses = set(net.bus.index[net.bus.zone == zone])
# --- get splitted grids
# nets_i (only the internal net, no boundary buses and branches)
nets_i[zone] = pp.select_subnet(net, this_zone_buses, include_results=True)
nets_i[zone]["bus"].sort_index(inplace=True)
if kwargs.get("with_oos_eq_loads", False):
create_eq_loads(nets_i[zone], boundary_buses[zone]["internal"],
idx_start=net.load.index.values.max() + n_add_load2,
zone=zone, in_service=False)
n_add_load2 += (nets_i[zone].load.name == "equivalent load").sum()
# nets_ib (the internal and the boundary branch including the external boundary buses and
# their bus elements)
nets_ib[zone] = pp.select_subnet(
net, this_zone_buses.union(boundary_buses[zone]["external"]), include_results=True)
nets_ib[zone]["bus"].sort_index(inplace=True)
pp.drop_inner_branches(nets_ib[zone], boundary_buses[zone]["external"])
# nets_ib0 (as nets_ib but without the bus elements at the external boundary buses)
nets_ib0[zone] = deepcopy(nets_ib[zone])
pp.drop_elements_at_buses(nets_ib0[zone], boundary_buses[zone]["external"],
branch_elements=False)
# nets_ib_eq_load (as nets_ib0 but with equivalent loads at the external boundary buses)
# -> used in decomp approach
nets_ib_eq_load[zone] = deepcopy(nets_ib0[zone])
create_eq_loads(nets_ib_eq_load[zone], boundary_buses[zone]["external"],
boundary_branches[zone], zone=zone,
idx_start=net.load.index.values.max() + n_add_load1)
n_add_load1 += (nets_ib_eq_load[zone].load.name == "equivalent load").sum()
# nets_b (only the boundary branches)
nets_b[zone] = deepcopy(nets_ib[zone])
full_drop_buses = nets_i[zone].bus.index.difference(boundary_buses["all"])
simple_drop_buses = nets_i[zone].bus.index.intersection(boundary_buses["all"])
pp.drop_buses(nets_b[zone], full_drop_buses, drop_elements=True)
pp.drop_buses(nets_b[zone], simple_drop_buses, drop_elements=False)
drop_internal_branch_elements(nets_b[zone], simple_drop_buses)
pp.drop_elements_at_buses(nets_b[zone], simple_drop_buses, branch_elements=False)
return boundary_buses, boundary_branches, nets_i, nets_ib, nets_ib0, nets_ib_eq_load, nets_b
def get_bus_lookup_by_name(net, bus_lookup_by_name):
return {net.bus.index[net.bus.name == name][0]: idx for name, idx in
bus_lookup_by_name.items() if name in net.bus.name.values}
def dict_sum_value(dict1, dict2):
"""
Return a dict with the sum of values of both input dicts.
"""
output = deepcopy(dict1)
dict2 = deepcopy(dict2)
for key in set(dict2.keys()) & set(output.keys()):
output[key] += dict2[key]
del dict2[key] # to not overwrite the new output values by dict2 in the line
# "output.update(dict2)"
output.update(dict2) # include all values of dict2 which keys are not in dict1
return output
def is_res_table_with_same_idx(net, elm):
return "res_" + elm in net.keys() and \
isinstance(net["res_" + elm], pd.DataFrame) and \
net["res_" + elm].shape[0] == net[elm].shape[0] and \
all(net["res_" + elm].index == net[elm].index)
def _sort_from_to_buses(net, elm, idx):
# determine from and to columns to switch
from_cols = [col for col in net[elm].columns if "from_" in col]
to_cols = [col.replace("from", "to") for col in from_cols]
if elm == "impedance":
ft_cols = [col for col in net[elm].columns if "ft_" in col]
tf_cols = [col.replace("ft_", "tf_") for col in ft_cols]
from_cols += ft_cols
to_cols += tf_cols
# for every column which includes "from_", there must be a counterpart:
assert not set(to_cols) - set(net[elm].columns)
# sort:
net[elm].loc[idx, from_cols + to_cols] = net[elm].loc[idx, to_cols + from_cols].values
def sort_from_to_buses(net, elm):
""" sorts the given element table by from_bus and to_bus columns. """
if net[elm].shape[0]:
cols = ["from_bus", "to_bus"]
assert not set(cols) - set(net[elm].columns)
idx_to_sort = net[elm].index.values[ | np.argsort(net[elm][cols].values, axis=1) | numpy.argsort |
import os
import pandas as pd
import numpy as np
from collections import Counter, defaultdict
def train_from_file(dir_path, leap_limit=15):
file_list = os.listdir(dir_path)
pig_format = [
"id",
"onset",
"offset",
"pitch",
"onsetvel",
"offsetvel",
"hand",
"fingernum",
]
right_init = Counter()
right_transition_count = Counter()
right_emission = defaultdict(Counter)
left_init = Counter()
left_transition_count = Counter()
left_emission = defaultdict(Counter)
for idx, file in enumerate(file_list):
path = dir_path + "/" + file
data_size = len(file_list)
print(f"Processing: {path} ({idx + 1}/{data_size})")
data = pd.read_csv(path, sep="\t", header=0, names=pig_format)
if data.fingernum.dtype == object:
data.fingernum = data.fingernum.apply(
lambda x: x.split("_")[0]
).astype("int")
left_hand = data[data.fingernum < 0]
right_hand = data[data.fingernum > 0]
init, transition, emission = count_fingering(
right_hand, limit=leap_limit
)
right_init += init
right_transition_count += transition
for k, counter in emission.items():
right_emission[k].update(counter)
init, transition, emission = count_fingering(
left_hand, limit=leap_limit
)
left_init += init
left_transition_count += transition
for k, counter in emission.items():
left_emission[k].update(counter)
return (right_init, right_transition_count, right_emission, left_init, left_transition_count, left_emission)
def pitch_to_key(pitch: str):
posx = {"C": 0, "D": 1, "E": 2, "F": 3, "G": 4, "A": 5, "B": 6}[pitch[0]]
posy = 0
if pitch[1].isdigit():
posx += (int(pitch[1]) - 4) * 7
elif pitch[1] == "#":
if pitch[2] == "#":
posx += (int(pitch[3]) - 4) * 7 + 1
else:
posy = 1
posx += (int(pitch[2]) - 4) * 7
elif pitch[1] == "b" or pitch[1] == "-":
if pitch[2] == "b" or pitch[2] == "-":
posx += (int(pitch[3]) - 4) * 7
else:
posy = 1
posx += (int(pitch[2]) - 4) * 7 - 1
return (posx, posy)
def note_to_diff(fingering_data, limit=15):
pos_x, pos_y = zip(*fingering_data.pitch.map(pitch_to_key))
series_x = pd.Series(pos_x)
series_y = pd.Series(pos_y)
diffs = list(
zip(
series_x.diff()
.fillna(0, downcast="infer")
.apply(lambda x: limit if x > limit else x)
.apply(lambda x: -limit if x < -limit else x),
series_y.diff().fillna(0, downcast="infer"),
)
)
return diffs
def count_fingering(fingering_data, limit=15):
hidden_state = list(
zip(
fingering_data.fingernum.shift(fill_value=0),
fingering_data.fingernum,
)
)
pos_x, pos_y = zip(*fingering_data.pitch.map(pitch_to_key))
model = pd.DataFrame(
{"hidden_state": hidden_state, "pos_x": pos_x, "pos_y": pos_y}
)
model["pos_diff"] = list(
zip(
model.pos_x.diff()
.fillna(0, downcast="infer")
.apply(lambda x: limit if x > limit else x)
.apply(lambda x: -limit if x < -limit else x),
model.pos_y.diff().fillna(0, downcast="infer"),
)
)
# First observation only
init = Counter([model.hidden_state[0][1]])
# Without first observation
transition = Counter(model.hidden_state[1:])
# Emission
emission = {
state: Counter(model[model.hidden_state == state].pos_diff)
for state in set(model.hidden_state[1:])
}
return (init, transition, Counter(emission))
def normalize(v):
return v / v.sum(axis=0)
def init_count_to_prob(init_count):
init_prob = np.zeros(5)
for key, value in init_count.items():
if key < 0:
init_prob[-key - 1] = value
else:
init_prob[key - 1] = value
return normalize(init_prob)
def transition_count_to_prob(transition_count):
transition_prob = np.zeros((5, 5))
for key, value in transition_count.items():
if key[0] < 0 and key[1] < 0:
transition_prob[-key[0] - 1, -key[1] - 1] = value
else:
transition_prob[key[0] - 1, key[1] - 1] = value
return np.apply_along_axis(normalize, axis=1, arr=transition_prob)
def series_to_matrix(emission_prob):
out_prob = np.zeros((5, 5))
for key, value in emission_prob.items():
if key[0] < 0 and key[1] < 0:
out_prob[-key[0] - 1, -key[1] - 1] = value
else:
out_prob[key[0] - 1, key[1] - 1] = value
return out_prob
def emission_count_to_prob(emission_count):
prob_df = (
pd.DataFrame.from_dict(emission_count).fillna(0, downcast="infer") + 1
).apply(normalize, axis=0)
prob_dict = {
out: series_to_matrix(prob_df.loc[out]) for out in prob_df.index
}
return prob_dict
def decoding(init_prob, transition, out_prob, observations, hand):
n_state = len(init_prob)
obs_len = len(observations)
delta = np.zeros((n_state, obs_len + 1))
psi = np.zeros((n_state, obs_len), dtype=int)
delta[:, 0] = np.log(init_prob)
for i, (pitch, time) in enumerate(
zip(observations.pitch_diff, observations.time_diff)
):
delta_mat = np.tile(delta[:, i], (n_state, 1)).transpose()
prod = delta_mat + np.log(transition) + np.log(out_prob[pitch])
if time < 0.03:
if hand == "R":
if pitch[0] > 0:
prod[ | np.tril_indices(n_state) | numpy.tril_indices |
import numpy as np
import os
import glob
import cv2
import geoio
import tifffile as tiff
flood_map_dir = "D:\\Workspace\\results\\pisar\\scences\\flood_mask_post"
geo_dir = "D:\\Workspace\\data\\raw\\pi-sar2\\20110312\\tiff_all"
dem_path = "D:\\Workspace\\data\\raw\\pi-sar2\\20110312\\dem.tif"
save_dir = "D:\\Workspace\\results\\pisar\\scences\\dem"
dem_img = geoio.GeoImage(dem_path)
dem_data = dem_img.get_data() # (bands, rows, cols)
for filepath in glob.glob(os.path.join(flood_map_dir, "*.png")):
basename = os.path.basename(filepath).split(".")[0]
flood_img = cv2.imread(filepath, 0)
flood_dem_img = np.zeros(flood_img.shape + (1,))
geo_path = os.path.join(geo_dir, "%s_sc.tif" % basename)
geo_img = geoio.GeoImage(geo_path)
y = | np.arange(flood_img.shape[0]) | numpy.arange |
from scipy import misc
import os
import time
import numpy as np
import tensorflow as tf
import random
import matplotlib.pyplot as plt
import matplotlib as mp
# --------------------------------------------------
# setup
def weight_variable(shape):
'''
Initialize weights
:param shape: shape of weights, e.g. [w, h ,Cin, Cout] where
w: width of the filters
h: height of the filters
Cin: the number of the channels of the filters
Cout: the number of filters
:return: a tensor variable for weights with initial values
'''
# IMPLEMENT YOUR WEIGHT_VARIABLE HERE
initial = tf.truncated_normal(shape, stddev=0.1)
W = tf.Variable(initial, name="W")
return W
def bias_variable(shape):
'''
Initialize biases
:param shape: shape of biases, e.g. [Cout] where
Cout: the number of filters
:return: a tensor variable for biases with initial values
'''
# IMPLEMENT YOUR BIAS_VARIABLE HERE
initial = tf.constant(0.1, shape=shape)
b = tf.Variable(initial, name="b")
return b
def conv2d(x, W):
'''
Perform 2-D convolution
:param x: input tensor of size [N, W, H, Cin] where
N: the number of images
W: width of images
H: height of images
Cin: the number of channels of images
:param W: weight tensor [w, h, Cin, Cout]
w: width of the filters
h: height of the filters
Cin: the number of the channels of the filters = the number of channels of images
Cout: the number of filters
:return: a tensor of features extracted by the filters, a.k.a. the results after convolution
'''
# IMPLEMENT YOUR CONV2D HERE
h_conv = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
return h_conv
def max_pool_2x2(x):
'''
Perform non-overlapping 2-D maxpooling on 2x2 regions in the input data
:param x: input data
:return: the results of maxpooling (max-marginalized + downsampling)
'''
# IMPLEMENT YOUR MAX_POOL_2X2 HERE
h_max = tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
return h_max
ntrain = 1000 # per class
ntest = 100 # per class
nclass = 10 # number of classes
imsize = 28
nchannels = 1
batchsize = 100
# Load training and test images
Train = | np.zeros((ntrain*nclass,imsize,imsize,nchannels)) | numpy.zeros |
import gym
import jax
import jax.numpy as jnp
import haiku as hk
import numpy as np
from tqdm.auto import trange
from collections import deque
from haiku_baselines.common.base_classes import TensorboardWriter, save, restore, select_optimizer
#from haiku_baselines.common.buffers import ReplayBuffer, PrioritizedReplayBuffer, EpisodicReplayBuffer, PrioritizedEpisodicReplayBuffer
from haiku_baselines.common.cpprb_buffers import ReplayBuffer, PrioritizedReplayBuffer
from haiku_baselines.common.utils import convert_states
from haiku_baselines.common.worker import gymMultiworker
from mlagents_envs.environment import UnityEnvironment, ActionTuple
class Deteministic_Policy_Gradient_Family(object):
def __init__(self, env, gamma=0.995, learning_rate=5e-5, buffer_size=50000, train_freq=1, gradient_steps=1, batch_size=32,
n_step = 1, learning_starts=1000, target_network_update_tau=5e-4, prioritized_replay=False,
prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_eps=1e-6,
log_interval=200, tensorboard_log=None, _init_setup_model=True, policy_kwargs=None,
full_tensorboard_log=False, seed=None, optimizer = 'adamw'):
self.env = env
self.log_interval = log_interval
self.policy_kwargs = policy_kwargs
self.seed = 42 if seed is None else seed
self.key_seq = hk.PRNGSequence(self.seed)
self.learning_starts = learning_starts
self.train_freq = train_freq
self.gradient_steps = gradient_steps
self.prioritized_replay = prioritized_replay
self.prioritized_replay_eps = prioritized_replay_eps
self.batch_size = batch_size
self.target_network_update_tau = target_network_update_tau
self.prioritized_replay_alpha = prioritized_replay_alpha
self.prioritized_replay_beta0 = prioritized_replay_beta0
self.buffer_size = buffer_size
self.learning_rate = learning_rate
self.gamma = gamma
self._gamma = self.gamma**n_step #n_step gamma
self.tensorboard_log = tensorboard_log
self.full_tensorboard_log = full_tensorboard_log
self.n_step_method = (n_step > 1)
self.n_step = n_step
self.params = None
self.target_params = None
self.save_path = None
self.optimizer = select_optimizer(optimizer,self.learning_rate,1e-2/self.batch_size)
self.get_env_setup()
self.get_memory_setup()
def save_params(self, path):
save(path, self.params)
def load_params(self, path):
self.params = self.target_params = restore(path)
def get_env_setup(self):
print("----------------------env------------------------")
if isinstance(self.env,UnityEnvironment):
print("unity-ml agent environmet")
self.env.reset()
group_name = list(self.env.behavior_specs.keys())[0]
group_spec = self.env.behavior_specs[group_name]
self.env.step()
dec, term = self.env.get_steps(group_name)
self.group_name = group_name
self.observation_space = [list(spec.shape) for spec in group_spec.observation_specs]
self.action_size = [group_spec.action_spec.continuous_size]
self.worker_size = len(dec.agent_id)
self.env_type = "unity"
elif isinstance(self.env,gym.Env) or isinstance(self.env,gym.Wrapper):
print("openai gym environmet")
action_space = self.env.action_space
observation_space = self.env.observation_space
self.observation_space = [list(observation_space.shape)]
self.action_size = [action_space.shape[0]]
self.worker_size = 1
self.env_type = "gym"
elif isinstance(self.env,gymMultiworker):
print("gymMultiworker")
env_info = self.env.env_info
self.observation_space = [list(env_info['observation_space'].shape)]
self.action_size = [env_info['action_space'].n]
self.worker_size = self.env.worker_num
self.env_type = "gymMultiworker"
print("observation size : ", self.observation_space)
print("action size : ", self.action_size)
print("worker_size : ", self.worker_size)
print("-------------------------------------------------")
def get_memory_setup(self):
'''
if self.prioritized_replay:
if self.n_step_method:
self.replay_buffer = PrioritizedEpisodicReplayBuffer(self.buffer_size,self.observation_space, self.worker_size, self.action_size[0],
self.n_step, self.gamma, self.prioritized_replay_alpha)
else:
self.replay_buffer = PrioritizedReplayBuffer(self.buffer_size,self.observation_space, self.worker_size, self.action_size[0],
self.prioritized_replay_alpha)
else:
if self.n_step_method:
self.replay_buffer = EpisodicReplayBuffer(self.buffer_size,self.observation_space, self.worker_size, self.action_size[0], self.n_step, self.gamma)
else:
self.replay_buffer = ReplayBuffer(self.buffer_size,self.observation_space, self.worker_size, self.action_size[0])
'''
if not self.prioritized_replay:
self.replay_buffer = ReplayBuffer(self.buffer_size,self.observation_space, self.worker_size, self.action_size, self.n_step, self.gamma)
else:
self.replay_buffer = PrioritizedReplayBuffer(self.buffer_size,self.observation_space,self.prioritized_replay_alpha, self.worker_size, self.action_size, self.n_step, self.gamma)
def setup_model(self):
pass
def _train_step(self, steps):
pass
def _get_actions(self, params, obses) -> np.ndarray:
pass
def actions(self,obs,steps):
pass
def learn(self, total_timesteps, callback=None, log_interval=1000, tb_log_name="Q_network",
reset_num_timesteps=True, replay_wrapper=None):
if self.n_step_method:
tb_log_name = "{}Step_".format(self.n_step) + tb_log_name
if self.prioritized_replay:
tb_log_name = tb_log_name + "+PER"
pbar = trange(total_timesteps, miniters=log_interval)
with TensorboardWriter(self.tensorboard_log, tb_log_name) as (self.summary, self.save_path):
if self.env_type == "unity":
self.learn_unity(pbar, callback, log_interval)
if self.env_type == "gym":
self.learn_gym(pbar, callback, log_interval)
if self.env_type == "gymMultiworker":
self.learn_gymMultiworker(pbar, callback, log_interval)
self.save_params(self.save_path)
def discription(self):
return "score : {:.3f}, loss : {:.3f} |".format(
np.mean(self.scoreque),np.mean(self.lossque)
)
def learn_unity(self, pbar, callback=None, log_interval=100):
self.env.reset()
self.env.step()
dec, term = self.env.get_steps(self.group_name)
self.scores = np.zeros([self.worker_size])
self.eplen = np.zeros([self.worker_size])
self.scoreque = deque(maxlen=10)
self.lossque = deque(maxlen=10)
obses = convert_states(dec.obs)
for steps in pbar:
self.eplen += 1
actions = self.actions(obses,steps)
action_tuple = ActionTuple(continuous=actions)
old_obses = obses
self.env.set_actions(self.group_name, action_tuple)
self.env.step()
if steps > self.learning_starts and steps % self.train_freq == 0: #train in step the environments
loss = self.train_step(steps,self.gradient_steps)
self.lossque.append(loss)
dec, term = self.env.get_steps(self.group_name)
term_ids = list(term.agent_id)
term_obses = convert_states(term.obs)
term_rewards = list(term.reward)
term_done = list(term.interrupted)
while len(dec) == 0:
self.env.step()
dec, term = self.env.get_steps(self.group_name)
if len(term.agent_id) > 0:
term_ids += list(term.agent_id)
newterm_obs = convert_states(term.obs)
term_obses = [ | np.concatenate((to,o),axis=0) | numpy.concatenate |
import matplotlib.pyplot as plt
import numpy as np
import pyfftw
import scipy.signal as sg
from PIL import Image, ImageDraw
from litho.config import PATH
from litho.gdsii.library import Library
class Mask:
"""
Binary Mask
Args:
x/ymax: for the computing area
x/y_gridsize: the simulated size of the area. Different value are supported. 2nm
CD: used for method poly2mask, 45nm
.. plot::
:include-source:
import matplotlib.pyplot as plt
from litho.config import PATH
from litho.mask import Mask
m = Mask()
m.x_range = [-300.0, 300.0]
m.y_range = [-300.0, 300.0]
m.x_gridsize = 10
m.y_gridsize = 10
m.openGDS(PATH.gdsdir / "AND2_X4.gds", 10)
m.maskfft()
m.smooth()
plt.imshow(
m.data,
extent=(m.x_range[0], m.x_range[1], m.y_range[0], m.y_range[1]),
cmap="hot",
interpolation="none",
)
plt.figure()
plt.imshow(
m.sdata,
extent=(m.x_range[0], m.x_range[1], m.y_range[0], m.y_range[1]),
cmap="hot",
interpolation="none",
)
plt.show()
"""
def __init__(self, xmax=500, ymax=500, x_gridsize=1, y_gridsize=1, CD=45):
self.x_range = [-xmax, xmax] # nm
self.y_range = [-ymax, ymax]
self.x_gridsize = x_gridsize # nm
self.y_gridsize = y_gridsize
self.CD = CD
def poly2mask(self):
"""Get Pixel-based Mask Image from Polygon Data
The Poylgon Data Form are sensitive
Similar to poly2mask in Matlab
"""
self.x_gridnum = int((self.x_range[1] - self.x_range[0]) / self.x_gridsize)
self.y_gridnum = int((self.y_range[1] - self.y_range[0]) / self.y_gridsize)
img = Image.new("L", (self.x_gridnum, self.y_gridnum), 0)
self.perimeter = 0.0
for ii in self.polygons:
pp = np.array(ii) * self.CD # polygon
polygonlen = len(pp)
self.perimeter += np.sum(np.abs(pp[0:-1] - pp[1:polygonlen]))
pp[:, 0] = (pp[:, 0] - self.x_range[0]) / self.x_gridsize
pp[:, 1] = (pp[:, 1] - self.y_range[0]) / self.y_gridsize
vetex_list = list(pp)
polygon = [tuple(y) for y in vetex_list]
ImageDraw.Draw(img).polygon(polygon, outline=1, fill=1)
self.data = np.array(img)
self.data = np.float64(self.data)
self.spat_part = pyfftw.empty_aligned(
(self.y_gridnum, self.x_gridnum), dtype="complex128"
)
self.freq_part = pyfftw.empty_aligned(
(self.y_gridnum, self.x_gridnum), dtype="complex128"
)
self.fft_mask = pyfftw.FFTW(self.spat_part, self.freq_part, axes=(0, 1))
def openGDS(
self, gdsdir, layername, boundary=0.16, pixels_per_um=10, with_fft=False
):
with open(gdsdir, "rb") as stream:
lib = Library.load(stream)
a = lib.pop(0)
b = []
xmin = []
xmax = []
ymin = []
ymax = []
for ii in range(0, len(a)):
if a[ii].layer == layername:
# if hasattr(a[ii],'data_type'):
if len(a[ii].xy) > 1:
aa = np.array(a[ii].xy) / 1000 * pixels_per_um
b.append(aa)
xmin.append(min([k for k, v in aa]))
xmax.append(max([k for k, v in aa]))
ymin.append(min([v for k, v in aa]))
ymax.append(max([v for k, v in aa]))
self.polylist = b
xmin = min(xmin)
xmax = max(xmax)
ymin = min(ymin)
ymax = max(ymax)
self.xmin = xmin - boundary * (xmax - xmin)
self.xmax = xmax + boundary * (xmax - xmin)
self.ymin = ymin - boundary * (ymax - ymin)
self.ymax = ymax + boundary * (ymax - ymin)
self.x_range = [self.xmin, self.xmax]
self.y_range = [self.ymin, self.ymax]
self.x_gridnum = int((self.xmax - self.xmin) / self.x_gridsize)
self.y_gridnum = int((self.ymax - self.ymin) / self.y_gridsize)
img = Image.new("L", (self.x_gridnum, self.y_gridnum), 0)
self.perimeter = 0.0
for ii in self.polylist:
pp = np.array(ii) # polygon
polygonlen = len(pp)
self.perimeter += np.sum(np.abs(pp[0:-1] - pp[1:polygonlen]))
pp[:, 0] = (pp[:, 0] - self.xmin) / self.x_gridsize
pp[:, 1] = (pp[:, 1] - self.ymin) / self.y_gridsize
vetex_list = list(pp)
polygon = [tuple(y) for y in vetex_list]
ImageDraw.Draw(img).polygon(polygon, outline=1, fill=1)
self.perimeter += np.sum(np.abs(pp[0:-1] - pp[1:polygonlen]))
self.data = np.array(img)
# Fourier transform pair, pyfftw syntax
self.spat_part = pyfftw.empty_aligned(
(self.y_gridnum, self.x_gridnum), dtype="complex128"
)
self.freq_part = pyfftw.empty_aligned(
(self.y_gridnum, self.x_gridnum), dtype="complex128"
)
self.fft_mask = pyfftw.FFTW(self.spat_part, self.freq_part, axes=(0, 1))
# use the fftw packages
def maskfft(self):
self.spat_part[:] = np.fft.ifftshift(self.data)
self.fft_mask()
self.fdata = np.fft.fftshift(self.freq_part)
def maskfftold(self):
self.fdata = np.fft.fftshift(np.fft.fft2(np.fft.ifftshift(self.data)))
def smooth(self):
xx = | np.linspace(-1, 1, 21) | numpy.linspace |
"""Construction of nodes and weights for integration."""
import functools
import itertools
from typing import Iterable, List, Optional, Tuple
import numpy as np
import scipy.special
import scipy.stats
from ..utilities.basics import Array, Options, StringRepresentation, format_options
class Integration(StringRepresentation):
r"""Configuration for building integration nodes and weights.
Parameters
----------
specification : `str`
How to build nodes and weights. One of the following:
- ``'monte_carlo'`` - Draw from a pseudo-random standard multivariate normal distribution. Integration
weights are ``1 / size``. The ``seed`` field of ``options`` can be used to seed the random number
generator.
- ``'halton'`` - Generate nodes according to the Halton. Different primes (2, 3, 5, etc.) are used for
different dimensions. Integration weights are ``1 / size``. By default, the first ``100`` values in each
dimension are discarded to eliminate correlation between dimensions. The ``discard`` field of ``options``
can be used to increase this number.
- ``'lhs'`` - Generate nodes according to Latin Hypercube Sampling (LHS). Integration weights are
``1 / size``. The ``seed`` field of ``options`` can be used to seed the random number generator.
- ``'mlhs'`` - Generate nodes according to Modified Latin Hypercube Sampling (MLHS) described by
:ref:`references:Hess, Train, and Polak (2004)`. Integration weights are ``1 / size``. The ``seed`` field
of ``options`` can be used to seed the random number generator.
- ``'product'`` - Generate nodes and weights according to the level-``size`` Gauss-Hermite product rule.
- ``'nested_product'`` - Generate nodes and weights according to the level-``size`` nested Gauss-Hermite
product rule. Weights can be negative.
- ``'grid'`` - Generate a sparse grid of nodes and weights according to the level-``size`` Gauss-Hermite
quadrature rule. Weights can be negative.
- ``'nested_grid'`` - Generate a sparse grid of nodes and weights according to the level ``size`` nested
Gauss-Hermite quadrature rule. Weights can be negative.
Best practice for low dimensions is probably to use ``'product'`` to a relatively high degree of polynomial
accuracy. In higher dimensions, ``'grid'`` appears to scale the best. For more information, see
:ref:`references:Judd and Skrainka (2011)` and :ref:`references:Conlon and Gortmaker (2019)`.
Sparse grids are constructed in analogously to the Matlab function `nwspgr <http://www.sparse-grids.de/>`_
created by <NAME> and <NAME>. For more information, see
:ref:`references:Heiss and Winschel (2008)`.
size : `int`
The number of draws if ``specification`` is ``'monte_carlo'``, ``'lhs'``, or ``'mlhs'``, and the level of the
quadrature rule otherwise.
specification_options : `dict, optional`
Options for the integration specification. The ``'monte_carlo'``, ``'lhs'``, and ``'mlhs'`` specifications
support the following option:
- **seed** : (`int`) - Passed to :class:`numpy.random.mtrand.RandomState` to seed the random number
generator before building integration nodes. By default, a seed is not passed to the random number
generator.
The ``'halton'`` specification supports the following option:
- **discard** : (`int`) - How many values at the beginning of each dimension's Halton sequence to discard.
Discarding values at the start of each dimension's sequence is the simplest way to eliminate correlation
between dimensions. By default, the first ``100`` values in each dimension are discarded.
Examples
--------
.. raw:: latex
\begin{examplenotebook}
.. toctree::
/_notebooks/api/integration.ipynb
.. raw:: latex
\end{examplenotebook}
"""
_size: int
_seed: Optional[int]
_description: str
_builder: functools.partial
_specification_options: Options
def __init__(self, specification: str, size: int, specification_options: Optional[Options] = None) -> None:
"""Validate the specification and identify the builder."""
specifications = {
'monte_carlo': (functools.partial(monte_carlo), "with Monte Carlo simulation"),
'halton': (functools.partial(halton), "with Halton sequences"),
'lhs': (functools.partial(lhs), "with Latin Hypercube Sampling (LHS)"),
'mlhs': (functools.partial(lhs, modified=True), "with Modified Latin Hypercube Sampling (MLHS)"),
'product': (functools.partial(product_rule), f"according to the level-{size} Gauss-Hermite product rule"),
'grid': (
functools.partial(sparse_grid),
f"in a sparse grid according to the level-{size} Gauss-Hermite rule"
),
'nested_product': (
functools.partial(product_rule, nested=True),
f"according to the level-{size} nested Gauss-Hermite product rule"
),
'nested_grid': (
functools.partial(sparse_grid, nested=True),
f"in a sparse grid according to the level-{size} nested Gauss-Hermite rule"
)
}
# validate the configuration
if specification not in specifications:
raise ValueError(f"specification must be one of {list(specifications.keys())}.")
if not isinstance(size, int) or size < 1:
raise ValueError("size must be a positive integer.")
if specification_options is not None and not isinstance(specification_options, dict):
raise ValueError("specification_options must be None or a dict.")
# initialize class attributes
self._size = size
self._specification = specification
self._builder, self._description = specifications[specification]
# set default options
self._specification_options: Options = {}
if specification == 'halton':
self._specification_options['discard'] = 100
# update and validate options
self._specification_options.update(specification_options or {})
if specification in {'monte_carlo', 'lhs', 'mlhs'}:
if not isinstance(self._specification_options.get('seed', 0), int):
raise ValueError("The specification option seed must be an integer.")
elif specification == 'halton':
discard = self._specification_options['discard']
if not isinstance(discard, int) or discard < 0:
raise ValueError("The specification option discard must be a nonnegative integer.")
def __str__(self) -> str:
"""Format the configuration as a string."""
return (
f"Configured to construct nodes and weights {self._description} with options "
f"{format_options(self._specification_options)}."
)
def _build_many(self, dimensions: int, ids: Iterable) -> Tuple[Array, Array, Array]:
"""Build concatenated IDs, nodes, and weights for each ID."""
builder = self._builder
if self._specification in {'monte_carlo', 'lhs', 'mlhs'}:
builder = functools.partial(builder, state=np.random.RandomState(self._specification_options.get('seed')))
count = 0
ids_list: List[Array] = []
nodes_list: List[Array] = []
weights_list: List[Array] = []
for i in ids:
if self._specification == 'halton':
nodes, weights = builder(dimensions, self._size, start=self._specification_options['discard'] + count)
else:
nodes, weights = builder(dimensions, self._size)
ids_list.append(np.repeat(i, weights.size))
nodes_list.append(nodes)
weights_list.append(weights)
count += weights.size
return np.concatenate(ids_list), np.concatenate(nodes_list), np.concatenate(weights_list)
def _build(self, dimensions: int) -> Tuple[Array, Array]:
"""Build nodes and weights."""
builder = self._builder
if self._specification in {'monte_carlo', 'lhs', 'mlhs'}:
builder = functools.partial(builder, state=np.random.RandomState(self._specification_options.get('seed')))
if self._specification == 'halton':
return builder(dimensions, self._size, start=self._specification_options['discard'])
return builder(dimensions, self._size)
def monte_carlo(dimensions: int, size: int, state: np.random.RandomState) -> Tuple[Array, Array]:
"""Draw from a pseudo-random standard multivariate normal distribution."""
nodes = state.normal(size=(size, dimensions))
weights = np.repeat(1 / size, size)
return nodes, weights
def halton(dimensions: int, size: int, start: int) -> Tuple[Array, Array]:
"""Generate nodes and weights for integration according to the Halton sequence."""
# generate Halton sequences
sequences = np.zeros((size, dimensions))
for dimension in range(dimensions):
base = get_prime(dimension)
for index in range(size):
value = 0.0
denominator = 1.0
quotient = start + index
while quotient > 0:
quotient, remainder = divmod(quotient, base)
denominator *= base
value += remainder / denominator
sequences[index, dimension] = value
# transform the sequences and construct weights
nodes = scipy.stats.norm().ppf(sequences)
weights = | np.repeat(1 / size, size) | numpy.repeat |
import pytest
import numpy as np
from numpy.testing import assert_almost_equal, assert_warns, assert_raises
from ...sims import linear
from .. import RV
class TestRVStat:
@pytest.mark.parametrize("n", [10, 100, 1000])
@pytest.mark.parametrize("obs_stat", [1.0])
@pytest.mark.parametrize("obs_pvalue", [1 / 1000])
def test_linear_oned(self, n, obs_stat, obs_pvalue):
np.random.seed(123456789)
x, y = linear(n, 1)
stat, pvalue = RV().test(x, y)
assert_almost_equal(stat, obs_stat, decimal=2)
assert_almost_equal(pvalue, obs_pvalue, decimal=2)
class TestRVErrorWarn:
"""Tests errors and warnings derived from MGC."""
def test_error_notndarray(self):
# raises error if x or y is not a ndarray
x = | np.arange(20) | numpy.arange |
import torch
import numpy as np
from torchvision import transforms
from PIL import Image
from utils.resize import ResizeTool
class Rescale:
def __init__(self, output_size):
self.output_size = output_size
def __resize_with_pad(self, image, label):
resized_image, ratio, top, bottom, left, right = ResizeTool.resize_image(image, self.output_size)
label = ResizeTool.adjust_label(label, ratio, top, left)
return resized_image, label
def __call__(self, sample, *args, **kwargs):
image, label = sample["image"], sample["label"]
image = image / 255.0
image, label = self.__resize_with_pad(image, label)
return {
"image": image,
"label": label
}
class ToTensor:
def __init__(self):
pass
def __call__(self, sample, *args, **kwargs):
image, label = sample["image"], sample["label"]
image_tensor = torch.from_numpy(image)
image_tensor = image_tensor.permute(2, 0, 1)
label_tensor = torch.from_numpy(label)
return {
"image": image_tensor.type(torch.float32),
"label": label_tensor.type(torch.float32)
}
class ColorTransform:
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self.color_jitter = transforms.ColorJitter(brightness, contrast, saturation, hue)
def __call__(self, sample, *args, **kwargs):
image, label = sample["image"], sample["label"]
image = Image.fromarray(image)
image = self.color_jitter(image)
image = | np.array(image) | numpy.array |
import tvm
from tvm import relay
from tvm import hago
import numpy as np
from common_utils import target_and_ctx
def create_hardware():
hardware = hago.Hardware()
hardware.add_op_desc('concatenate', hago.OpDesc(in_dtypes='float32', out_dtypes='float32'))
hardware.add_op_desc('concatenate', hago.OpDesc(in_dtypes='int8', out_dtypes='int8'))
hardware.add_op_desc('concatenate', hago.OpDesc(in_dtypes='int32', out_dtypes='int32'))
hardware.add_op_desc('add', hago.OpDesc(in_dtypes='int8', out_dtypes='int32'))
hardware.add_op_desc('nn.dense', hago.OpDesc(in_dtypes='int8', out_dtypes='int32'))
hardware.add_op_desc('nn.conv2d', hago.OpDesc(in_dtypes='int8', out_dtypes='int32'))
return hardware
def test_dense(ishape=(8, 16), wshape=(10, 16), batch_num=5, device='cpu'):
target, ctx = target_and_ctx(device)
data = relay.var('data', shape=ishape)
weight = relay.var('weight', shape=wshape)
out = relay.nn.dense(data, weight)
func = relay.Function([data, weight], out)
# weight_np = np.random.rand(*wshape).astype('float32')
weight_np = np.random.normal(size=wshape).astype('float32')
# generate dataset
batches = []
for i in range(batch_num):
data_np = np.random.rand(*ishape).astype('float32')
ex = relay.create_executor("debug", ctx=ctx, target=target)
out_np = ex.evaluate(func)(data_np, weight_np).asnumpy()
pred_np = np.argmax(out_np, axis=1)
batches.append({'data': tvm.nd.array(data_np), 'label': tvm.nd.array(pred_np)})
dataset = hago.CalibrationDataset(batches)
params = {'weight': tvm.nd.array(weight_np)}
return func, params, dataset
def test_concatenate(ishape=(8, 16), wshape=(10, 16), batch_num=3, device='cpu'):
target, ctx = target_and_ctx(device)
w0shape = wshape
w1shape = (wshape[1], wshape[0])
data_a = relay.var('data_a', shape=ishape)
data_b = relay.var('data_b', shape=ishape)
data_c = relay.var('data_c', shape=ishape)
data_d = relay.var('data_d', shape=ishape)
weight0 = relay.var('weight0', shape=w0shape)
weight1 = relay.var('weight1', shape=w0shape)
weight2 = relay.var('weight2', shape=w0shape)
weight3 = relay.var('weight3', shape=w0shape)
dense_a = relay.nn.dense(data_a, weight0)
dense_b = relay.nn.dense(data_b, weight1)
dense_c = relay.nn.dense(data_c, weight2)
dense_d = relay.nn.dense(data_d, weight3)
concat = relay.concatenate([dense_a, dense_b, dense_c, dense_d], axis=0)
# 32, 10
weight5 = relay.var('weight5', shape=w1shape)
out = relay.nn.dense(concat, weight5)
func = relay.Function([data_a, data_b, data_c, data_d, weight0, weight1, weight2, weight3, weight5], out)
weight0_np = np.random.normal(size=w0shape).astype('float32')
weight1_np = np.random.normal(size=w0shape).astype('float32')
weight2_np = np.random.normal(size=w0shape).astype('float32')
weight3_np = np.random.normal(size=w0shape).astype('float32')
weight5_np = np.random.normal(size=w1shape).astype('float32')
# generate dataset
batches = []
for i in range(batch_num):
data_a_np = np.random.rand(*ishape).astype('float32')
data_b_np = np.random.rand(*ishape).astype('float32')
data_c_np = np.random.rand(*ishape).astype('float32')
data_d_np = | np.random.rand(*ishape) | numpy.random.rand |
import time
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
def fft_window(tnum, nfft, window, overlap):
# IN : full length of time series, nfft, window name, overlap ratio
# OUT : bins, 1 x nfft window function
# use overlapping
bins = int(np.fix((int(tnum/nfft) - overlap)/(1.0 - overlap)))
# window function
if window == 'rectwin': # overlap = 0.5
win = np.ones(nfft)
elif window == 'hann': # overlap = 0.5
win = np.hanning(nfft)
elif window == 'hamm': # overlap = 0.5
win = np.hamming(nfft)
elif window == 'kaiser': # overlap = 0.62
win = np.kaiser(nfft, beta=30)
elif window == 'HFT248D': # overlap = 0.84
z = 2*np.pi/nfft*np.arange(0,nfft)
win = 1 - 1.985844164102*np.cos(z) + 1.791176438506*np.cos(2*z) - 1.282075284005*np.cos(3*z) + \
0.667777530266*np.cos(4*z) - 0.240160796576*np.cos(5*z) + 0.056656381764*np.cos(6*z) - \
0.008134974479*np.cos(7*z) + 0.000624544650*np.cos(8*z) - 0.000019808998*np.cos(9*z) + \
0.000000132974*np.cos(10*z)
return bins, win
def fftbins(x, dt, nfft, window, overlap, detrend=0, full=0):
# IN : 1 x tnum data
# OUT : bins x faxis fftdata
tnum = len(x)
bins, win = fft_window(tnum, nfft, window, overlap)
win_factor = np.mean(win**2) # window factors
# make an x-axis #
ax = np.fft.fftfreq(nfft, d=dt) # full 0~fN -fN~-f1
if np.mod(nfft, 2) == 0: # even nfft
ax = np.hstack([ax[0:int(nfft/2)], -(ax[int(nfft/2)]), ax[int(nfft/2):nfft]])
if full == 1: # full shift to -fN ~ 0 ~ fN
ax = np.fft.fftshift(ax)
else: # half 0~fN
ax = ax[0:int(nfft/2+1)]
# make fftdata
if full == 1: # full shift to -fN ~ 0 ~ fN
if np.mod(nfft, 2) == 0: # even nfft
fftdata = np.zeros((bins, nfft+1), dtype=np.complex_)
else: # odd nfft
fftdata = np.zeros((bins, nfft), dtype=np.complex_)
else: # half 0 ~ fN
fftdata = np.zeros((bins, int(nfft/2+1)), dtype=np.complex_)
for b in range(bins):
idx1 = int(b*np.fix(nfft*(1 - overlap)))
idx2 = idx1 + nfft
sx = x[idx1:idx2]
if detrend == 0:
sx = signal.detrend(sx, type='constant') # subtract mean
elif detrend == 1:
sx = signal.detrend(sx, type='linear')
sx = sx * win # apply window function
# get fft
SX = np.fft.fft(sx, n=nfft)/nfft # divide by the length
if np.mod(nfft, 2) == 0: # even nfft
SX = np.hstack([SX[0:int(nfft/2)], np.conj(SX[int(nfft/2)]), SX[int(nfft/2):nfft]])
if full == 1: # shift to -fN ~ 0 ~ fN
SX = np.fft.fftshift(SX)
else: # half 0 ~ fN
SX = SX[0:int(nfft/2+1)]
fftdata[b,:] = SX
return ax, fftdata, win_factor
def cwt(x, dt, df, detrend=0, full=1):
# detrend signal
if detrend == 0:
x = signal.detrend(x, type='constant') # subtract mean
elif detrend == 1:
x = signal.detrend(x, type='linear')
# make a t-axis
tnum = len(x)
nfft = nextpow2(tnum) # power of 2
t = np.arange(nfft)*dt
# make a f-axis with constant df
s0 = 2.0*dt # the smallest scale
ax = np.arange(0.0, 1.0/(1.03*s0), df) # 1.03 for the Morlet wavelet function
# scales
old_settings = np.seterr(divide='ignore')
sj = 1.0/(1.03*ax)
np.seterr(**old_settings)
dj = np.log2(sj/s0) / np.arange(len(sj)) # dj; necessary for reconstruction
sj[0] = tnum*dt/2.0
dj[0] = 0 # remove infinity point due to fmin = 0
# Morlet wavelet function (unnormalized)
omega0 = 6.0 # nondimensional wavelet frequency
wf0 = lambda eta: np.pi**(-1.0/4) * np.exp(1.0j*omega0*eta) * np.exp(-1.0/2*eta**2)
ts = np.sqrt(2)*sj # e-folding time for Morlet wavelet with omega0 = 6; significance level
# FFT of signal
X = np.fft.fft(x, n=nfft)/nfft
# calculate CWT
snum = len(sj)
cwtdata = np.zeros((nfft, snum), dtype=np.complex_)
for j, s in enumerate(sj):
# nondimensional time axis at time scale s
eta = t/s
# FFT of the normalized wavelet function
W = np.fft.fft( np.conj( wf0(eta - np.mean(eta))*np.sqrt(dt/s) ) )
# Wavelet transform at scae s for all n time
cwtdata[:,j] = np.conj(np.fft.fftshift(np.fft.ifft(X * W) * nfft)) # phase direction correct
# full size
if full == 1:
cwtdata = np.hstack([np.fliplr(np.conj(cwtdata)), cwtdata[:,1:]]) # real x only
ax = np.hstack([-ax[::-1], ax[1:]])
return ax, cwtdata[0:tnum,:], dj, ts
def cross_power(XX, YY, win_factor):
Pxy = np.mean(XX * np.conjugate(YY), 0)
Pxy = np.abs(Pxy).real / win_factor
return Pxy
def coherence(XX, YY):
# normalization outside loop
# Pxy = np.mean(XX * np.conjugate(YY), 0)
# Pxx = np.mean(XX * np.conjugate(XX), 0).real
# Pyy = np.mean(YY * np.conjugate(YY), 0).real
# Gxy = np.abs(Pxy).real / np.sqrt(Pxx * Pyy)
# normalization inside loop
bins = XX.shape[0]
val = np.zeros(XX.shape, dtype=np.complex_)
for i in range(bins):
X = XX[i,:]
Y = YY[i,:]
Pxx = X * np.matrix.conjugate(X)
Pyy = Y * np.matrix.conjugate(Y)
val[i,:] = X*np.matrix.conjugate(Y) / np.sqrt(Pxx*Pyy)
# average over bins
Gxy = np.mean(val, 0)
Gxy = np.abs(Gxy).real
return Gxy
def cross_phase(XX, YY):
Pxy = np.mean(XX * np.conjugate(YY), 0)
Axy = np.arctan2(Pxy.imag, Pxy.real).real
return Axy
def correlation(XX, YY, win_factor):
bins = XX.shape[0]
nfreq = XX.shape[1]
val = np.zeros(XX.shape, dtype=np.complex_)
for b in range(bins):
X = XX[b,:]
Y = YY[b,:]
val[b,:] = np.fft.ifftshift(X*np.matrix.conjugate(Y) / win_factor)
val[b,:] = np.fft.ifft(val[b,:], n=nfreq)*nfreq
val[b,:] = np.fft.fftshift(val[b,:])
val[b,:] = np.flip(val[b,:], axis=0)
# average over bins; return real value
Cxy = np.mean(val, 0)
return Cxy.real
def corr_coef(XX, YY):
bins = XX.shape[0]
nfreq = XX.shape[1]
val = np.zeros(XX.shape, dtype=np.complex_)
for b in range(bins):
X = XX[b,:]
Y = YY[b,:]
x = np.fft.ifft(np.fft.ifftshift(X), n=nfreq)*nfreq
Rxx = np.mean(x**2)
y = np.fft.ifft(np.fft.ifftshift(Y), n=nfreq)*nfreq
Ryy = np.mean(y**2)
val[b,:] = np.fft.ifftshift(X*np.matrix.conjugate(Y))
val[b,:] = np.fft.ifft(val[b,:], n=nfreq)*nfreq
val[b,:] = np.fft.fftshift(val[b,:])
val[b,:] = np.flip(val[b,:], axis=0)/np.sqrt(Rxx*Ryy)
# average over bins; return real value
cxy = np.mean(val, 0)
return cxy.real
def xspec(XX, YY, win_factor):
val = np.abs(XX * np.conjugate(YY)).real
return val
def bicoherence(XX, YY):
# ax1 = self.Dlist[dtwo].ax # full -fN ~ fN
# ax2 = np.fft.ifftshift(self.Dlist[dtwo].ax) # full 0 ~ fN, -fN ~ -f1
# ax2 = ax2[0:int(nfft/2+1)] # half 0 ~ fN
bins = XX.shape[0]
full = XX.shape[1]
half = int(full/2+1) # half length
# calculate bicoherence
B = np.zeros((full, half), dtype=np.complex_)
P12 = np.zeros((full, half))
P3 = np.zeros((full, half))
val = np.zeros((full, half))
for b in range(bins):
X = XX[b,:] # full -fN ~ fN
Y = YY[b,:] # full -fN ~ fN
Xhalf = np.fft.ifftshift(X) # full 0 ~ fN, -fN ~ -f1
Xhalf = Xhalf[0:half] # half 0 ~ fN
X1 = np.transpose(np.tile(X, (half, 1)))
X2 = np.tile(Xhalf, (full, 1))
X3 = np.zeros((full, half), dtype=np.complex_)
for j in range(half):
if j == 0:
X3[0:, j] = Y[j:]
else:
X3[0:(-j), j] = Y[j:]
B = B + X1 * X2 * np.matrix.conjugate(X3) # complex bin average
P12 = P12 + (np.abs(X1 * X2).real)**2 # real average
P3 = P3 + (np.abs(X3).real)**2 # real average
# val = np.log10(np.abs(B)**2) # bispectrum
old_settings = np.seterr(invalid='ignore')
val = (np.abs(B)**2) / P12 / P3 # bicoherence
np.seterr(**old_settings)
# summation over pairs
sum_val = np.zeros(full)
for i in range(half):
if i == 0:
sum_val = sum_val + val[:,i]
else:
sum_val[i:] = sum_val[i:] + val[:-i,i]
N = np.array([i+1 for i in range(half)] + [half for i in range(full-half)])
sum_val = sum_val / N # element wise division
return val, sum_val
def ritz_nonlinear(XX, YY):
bins = XX.shape[0]
full = XX.shape[1]
kidx = get_kidx(full)
Aijk = np.zeros((full, full), dtype=np.complex_) # Xo1 Xo2 cXo
cAijk = np.zeros((full, full), dtype=np.complex_) # cXo1 cXo2 Xo
Bijk = np.zeros((full, full), dtype=np.complex_) # Yo cXo1 cXo2
Aij = np.zeros((full, full)) # |Xo1 Xo2|^2
Ak = np.zeros(full) # Xo cXo
Bk = np.zeros(full, dtype=np.complex_) # Yo cXo
for b in range(bins):
X = XX[b,:] # full -fN ~ fN
Y = YY[b,:] # full -fN ~ fN
# make Xi and Xj
Xi = np.transpose(np.tile(X, (full, 1))) # columns of (-fN ~ fN)
Xj = np.tile(X, (full, 1)) # rows of (-fN ~ fN)
# make Xk and Yk
Xk = np.zeros((full, full), dtype=np.complex_)
Yk = np.zeros((full, full), dtype=np.complex_)
for k in range(full):
idx = kidx[k]
for n, ij in enumerate(idx):
Xk[ij] = X[k]
Yk[ij] = Y[k]
# do ensemble average
Aijk = Aijk + Xi * Xj * np.matrix.conjugate(Xk) / bins
cAijk = cAijk + np.matrix.conjugate(Xi) * np.matrix.conjugate(Xj) * Xk / bins
Bijk = Bijk + np.matrix.conjugate(Xi) * np.matrix.conjugate(Xj) * Yk / bins
Aij = Aij + (np.abs(Xi * Xj).real)**2 / bins
Ak = Ak + (np.abs(X).real)**2 / bins
Bk = Bk + Y * np.matrix.conjugate(X) / bins
# Linear transfer function ~ growth rate
Lk = np.zeros(full, dtype=np.complex_)
bsum = np.zeros(full, dtype=np.complex_)
asum = np.zeros(full)
for k in range(full):
idx = kidx[k]
for n, ij in enumerate(idx):
bsum[k] = bsum[k] + Aijk[ij] * Bijk[ij] / Aij[ij]
asum[k] = asum[k] + (np.abs(Aijk[ij]).real)**2 / Aij[ij]
Lk = (Bk - bsum) / (Ak - asum)
# Quadratic transfer function ~ nonlinear energy transfer rate
Lkk = np.zeros((full, full), dtype=np.complex_)
for k in range(full):
idx = kidx[k]
for n, ij in enumerate(idx):
Lkk[ij] = Lk[k]
Qijk = (Bijk - Lkk * cAijk) / Aij
return Lk, Qijk, Bk, Aijk
def wit_nonlinear(XX, YY):
bins = XX.shape[0]
full = XX.shape[1]
kidx = get_kidx(full)
Lk = np.zeros(full, dtype=np.complex_) # Linear
Qijk = np.zeros((full, full), dtype=np.complex_) # Quadratic
print('For stable calculations, bins ({0}) >> full/2 ({1})'.format(bins, full/2))
for k in range(full):
idx = kidx[k]
# construct equations for each k
U = np.zeros((bins, len(idx)+1), dtype=np.complex_) # N (number of ensembles) x P (number of pairs + 1)
V = | np.zeros(bins, dtype=np.complex_) | numpy.zeros |
#!/usr/bin/env python3
import os
import cv2
import numpy as np
import pandas as pd
import tensorflow as tf
from tqdm import tqdm
from glob2 import glob
from skimage import transform
from skimage.io import imread, imsave
from sklearn.preprocessing import normalize
from sklearn.metrics import roc_curve, auc
from tensorflow.keras.preprocessing.image import ImageDataGenerator
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
class Eval_folder:
def __init__(self, model_interf, data_path, batch_size=128, save_embeddings=None):
if isinstance(model_interf, str) and model_interf.endswith("h5"):
model = tf.keras.models.load_model(model_interf)
self.model_interf = lambda imms: model((imms - 127.5) * 0.0078125).numpy()
else:
self.model_interf = model_interf
self.dist_func = lambda aa, bb: np.dot(aa, bb)
self.embs, self.imm_classes, self.filenames = self.prepare_images_and_embeddings(data_path, batch_size, save_embeddings)
self.data_path = data_path
def prepare_images_and_embeddings(self, data_path, batch_size=128, save_embeddings=None):
if save_embeddings and os.path.exists(save_embeddings):
print(">>>> Reloading from backup:", save_embeddings)
aa = np.load(save_embeddings)
embs, imm_classes, filenames = aa["embs"], aa["imm_classes"], aa["filenames"]
embs, imm_classes = embs.astype("float32"), imm_classes.astype("int")
else:
img_shape = (112, 112)
img_gen = ImageDataGenerator().flow_from_directory(data_path, class_mode="binary", target_size=img_shape, batch_size=batch_size, shuffle=False)
steps = int(np.ceil(img_gen.classes.shape[0] / img_gen.batch_size))
filenames = np.array(img_gen.filenames)
embs, imm_classes = [], []
for _ in tqdm(range(steps), "Embedding"):
imm, imm_class = img_gen.next()
emb = self.model_interf(imm)
embs.extend(emb)
imm_classes.extend(imm_class)
embs, imm_classes = normalize(np.array(embs).astype("float32")), np.array(imm_classes).astype("int")
if save_embeddings:
print(">>>> Saving embeddings to:", save_embeddings)
np.savez(save_embeddings, embs=embs, imm_classes=imm_classes, filenames=filenames)
return embs, imm_classes, filenames
def do_evaluation(self):
register_ids = np.unique(self.imm_classes)
print(">>>> [base info] embs:", self.embs.shape, "imm_classes:", self.imm_classes.shape, "register_ids:", register_ids.shape)
register_base_embs = np.array([]).reshape(0, self.embs.shape[-1])
register_base_dists = []
for register_id in tqdm(register_ids, "Evaluating"):
pos_pick_cond = self.imm_classes == register_id
pos_embs = self.embs[pos_pick_cond]
register_base_emb = normalize([np.sum(pos_embs, 0)])[0]
register_base_dist = self.dist_func(self.embs, register_base_emb)
register_base_dists.append(register_base_dist)
register_base_embs = np.vstack([register_base_embs, register_base_emb])
register_base_dists = np.array(register_base_dists).T
accuracy = (register_base_dists.argmax(1) == self.imm_classes).sum() / register_base_dists.shape[0]
reg_pos_cond = np.equal(register_ids, np.expand_dims(self.imm_classes, 1))
reg_pos_dists = register_base_dists[reg_pos_cond].ravel()
reg_neg_dists = register_base_dists[np.logical_not(reg_pos_cond)].ravel()
label = np.concatenate([np.ones_like(reg_pos_dists), np.zeros_like(reg_neg_dists)])
score = | np.concatenate([reg_pos_dists, reg_neg_dists]) | numpy.concatenate |
import torch
import torch.nn as nn
import numpy as np
from cnnseq import utils
from cnnseq.utils_models import set_optimizer, flatten_audio
from skimage.io import imsave
import json
import os
# Recurrent neural network (many-to-one)
class Seq2Seq(nn.Module):
def __init__(self, params, device=torch.device('cpu')):
super(Seq2Seq, self).__init__()
self.params = params
input_size = self.params['input_size']
output_size = self.params['output_size']
hidden_size = self.params['hidden_size']
num_layers = self.params['num_layers']
activation_function = self.params['activation_function']
self.model_type = self.params['model_type']
self.device = device
self.hidden_size = self.params['hidden_size']
self.num_layers = self.params['num_layers']
if self.model_type == 'seq2seq':
self.lstm_enc = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
self.lstm_dec = nn.LSTM(output_size, hidden_size, num_layers, batch_first=True)
self.decoder_linear = nn.Linear(hidden_size, output_size)
elif self.model_type == 'seq2seq_gru':
self.lstm_enc = nn.GRU(input_size, hidden_size, num_layers=num_layers, batch_first=True)
self.lstm_dec = nn.GRU(output_size, hidden_size, num_layers=num_layers, batch_first=True)
self.decoder_linear = nn.Linear(hidden_size, output_size)
else:
self.lstm_enc = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
self.decoder_linear = nn.Linear(hidden_size, input_size)
#self.lstm_enc = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
#if self.model_type == 'seq2seq':
# self.lstm_dec = nn.LSTM(output_size, hidden_size, num_layers, batch_first=True)
# self.decoder_linear = nn.Linear(hidden_size, output_size)
#else:
# self.decoder_linear = nn.Linear(hidden_size, input_size)
# self.softmax = nn.LogSoftmax(dim=1)
if activation_function == 'softmax':
self.softmax = nn.Softmax(dim=num_layers)
elif activation_function == 'sigmoid':
self.softmax = nn.Sigmoid()
elif activation_function == 'tanh':
self.softmax = nn.Tanh()
elif activation_function =='relu':
self.softmax = nn.ReLU()
else:
self.softmax = nn.Softplus()
def forward(self, x, y):
# print("x: {}".format(np.shape(x)))
# Set initial hidden and cell states
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(self.device)
c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(self.device)
# Encoder: Forward propagate LSTM
if self.model_type == 'seq2seq' or self.model_type == 'lstm':
out, hidden = self.lstm_enc(x, (h0, c0)) # out: tensor of shape (batch_size, seq_length, hidden_size)
# print("Enc shapes: out {}, hidden: {}, h0: {}, c0: {}".format(np.shape(out), np.shape(hidden), np.shape(h0), np.shape(c0)))
else: # seq2seq_gru
out, hidden = self.lstm_enc(x, h0)
# Decoder: Decode the hidden state of the last time step
if 'seq2seq' in self.model_type:
out, _ = self.lstm_dec(y, hidden)
# print("Decoder: {}".format(np.shape(out)))
out = self.softmax(self.decoder_linear(out))
# print("Out: {}".format(np.shape(out)))
return out, hidden
def train(model, dataloader_train, dataloader_label_train, args, device):
# Loss and optimizer
criterion = nn.L1Loss() # SmoothL1Loss, NLLLoss(), CrossEntropyLoss()
optimizer, partial_name = set_optimizer(model, args)
# New results dir based on model's parameters
res_dir = args.results_dir + '{}_trainSize_{}_testSize_{}/'.format(partial_name, args.train_samples_size,
args.test_samples_size)
args.results_dir = res_dir
utils.ensure_dir(res_dir)
# print("res_dir: {}".format(res_dir))
log_file = open(res_dir + 'log.txt', 'w')
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=args.scheduler_factor,
verbose=True)
total_step = len(dataloader_train.keys())
loss_arr = []
epoch_arr = []
for epoch in range(args.num_epochs):
for i, (im, la) in enumerate(zip(dataloader_train.keys(), dataloader_label_train.keys())):
labels = dataloader_label_train[la]
images = dataloader_train[im]
# print("Shape images: {}, labels: {}".format(np.shape(images), np.shape(labels)))
images = images.reshape(-1, np.shape(images)[-1], args.input_size).to(device) # bsx28x28
labels = labels.reshape(-1, np.shape(labels)[-1], args.output_size).to(device) # labels.to(device)
# print("Shape after reshape images: {}, labels: {}".format(np.shape(images), np.shape(labels)))
# Forward pass
target = labels # images # 1-images
outputs, _ = model(images, target) # (2, 96, 90), (2, 6000, 8)
loss = criterion(outputs, target)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i + 1) % 50 == 0:
log_str = 'Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch + 1, args.num_epochs,
i + 1, total_step, loss.item())
print(log_str)
log_file.write(log_str + '\n')
scheduler.step(loss)
loss_arr.append(loss.item())
epoch_arr.append(epoch + 1)
if (epoch + 1) % args.save_step == 0:
# Input images
input_data = images.cpu().data.numpy()[0]
input_reshaped = np.reshape(input_data, [np.shape(input_data)[1], np.shape(input_data)[0]])
# Target audio
images_white_data = target.cpu().data.numpy()[0]
im_reshaped = np.reshape(images_white_data,
[np.shape(images_white_data)[1], np.shape(images_white_data)[0]])
im_reshaped = flatten_audio(im_reshaped, args)
# Generated audio
outputs_data = outputs.cpu().data.numpy()[0]
out_reshaped = np.reshape(outputs_data, [np.shape(outputs_data)[1], np.shape(outputs_data)[0]])
out_reshaped = flatten_audio(out_reshaped, args)
# Save audio, 16KHz
from scipy.io.wavfile import write
scaled = -1.0 + (1.0 - (-1.0)) * (input_reshaped - np.min(input_reshaped)) / (
np.max(input_reshaped) - np.min(input_reshaped))
imsave('{}{}_input.jpg'.format(res_dir, epoch + 1), scaled)
scaled = -1.0 + (1.0 - (-1.0)) * (im_reshaped - np.min(im_reshaped)) / (
np.max(im_reshaped) - np.min(im_reshaped))
scaled = np.int16(scaled / np.max(np.abs(scaled)) * 32767)
write('{}{}_target.wav'.format(res_dir, epoch + 1), 16000, scaled[0])
scaled2 = np.int16(out_reshaped / np.max(np.abs(out_reshaped)) * 32767)
write('{}{}_gen.wav'.format(res_dir, epoch + 1), 16000, scaled2[0])
# imsave('{}{}_target.jpg'.format(res_dir, epoch + 1), images_white_data[0])
# imsave('{}{}_gen.jpg'.format(res_dir, epoch + 1), outputs_data[0])
# Save the model checkpoint
torch.save(model.state_dict(), res_dir + args.saved_model)
# Plot loss_epochs.svg file
import matplotlib.pyplot as plt
plt.figure(figsize=[6, 6])
plt.plot(epoch_arr, loss_arr, '*-')
plt.title('Training loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.grid('on')
# plt.gca().set_position([0, 0, 1, 1])
plt.savefig("{}loss_epochs.svg".format(res_dir))
plt.cla()
# Save args in json file so model can be fully loaded independently
with open(os.path.join(res_dir, args.saved_model_parameters), 'w') as fp:
json.dump(vars(args), fp, sort_keys=True, indent=4)
log_file.close()
return res_dir
def test(dataloader_test, dataloader_label_test, args, device):
# Test the model
print("\nTesting model")
# Load model parameters from JSON file
with open(os.path.join(args.results_dir, args.saved_model_parameters), 'r') as fp:
print(os.path.join(args.results_dir, args.saved_model_parameters))
params = json.load(fp)
# model = CNNSeq2Seq(args.input_size, args.output_size, args.hidden_size, args.num_layers).to(device)
model = Seq2Seq(params, device).to(device)
model_path = args.results_dir + args.saved_model
print(model_path)
model.load_state_dict(torch.load(model_path))
with torch.no_grad():
correct = 0
total = 0
for im, la in zip(dataloader_test.keys(), dataloader_label_test.keys()):
labels = dataloader_label_test[la]
images = dataloader_test[im]
images = images.reshape(-1, np.shape(images)[-1], params['input_size']).to(device)
labels = labels.reshape(-1, np.shape(labels)[-1], params['output_size']).to(device)
target = labels # 1 - images
outputs, _ = model(images, target)
input_data_tmp = images.cpu().data.numpy()
images_white_data_tmp = target.cpu().data.numpy()
outputs_data_tmp = outputs.cpu().data.numpy()
print(np.size(images_white_data_tmp))
for i in range(0, min(2, params['batch_size'])):
input_data = input_data_tmp[i]
images_white_data = images_white_data_tmp[i]
outputs_data = outputs_data_tmp[i]
im_reshaped = np.reshape(images_white_data,
[np.shape(images_white_data)[1], np.shape(images_white_data)[0]])
im_reshaped = flatten_audio(im_reshaped, args)
input_reshaped = np.reshape(input_data, [np.shape(input_data)[1], np.shape(input_data)[0]])
out_reshaped = np.reshape(outputs_data, [np.shape(outputs_data)[1], np.shape(outputs_data)[0]])
out_reshaped = flatten_audio(out_reshaped, args)
print(np.shape(out_reshaped))
# Save audio, 16KHz
from scipy.io.wavfile import write
scaled = -1.0 + (1.0 - (-1.0)) * (input_reshaped - np.min(input_reshaped)) / (
np.max(input_reshaped) - np.min(input_reshaped))
imsave('{}test_input.jpg'.format(args.results_dir), scaled)
scaled = -1.0 + (1.0 - (-1.0)) * (im_reshaped - np.min(im_reshaped)) / (
| np.max(im_reshaped) | numpy.max |
import scipy as sp
import numpy as np
from scipy.stats import lognorm as dist
from ngboost.distns import SurvivalDistn
from ngboost.scores import LogScore, CRPScore
class LogNormalLogScore(LogScore):
def score(self, Y):
E = Y['Event']
T = Y['Time']
cens = (1-E) * np.log(1 - self.dist.cdf(T) + self.eps)
uncens = E * self.dist.logpdf(T)
return -(cens + uncens)
def d_score(self, Y):
E = Y['Event'][:,np.newaxis]
T = Y['Time']
lT = np.log(T)
Z = (lT - self.loc) / self.scale
D_uncens = np.zeros((self.loc.shape[0], 2))
D_uncens[:, 0] = (self.loc - lT) / (self.scale ** 2)
D_uncens[:, 1] = 1 - ((self.loc - lT) ** 2) / (self.scale ** 2)
D_cens = np.zeros((self.loc.shape[0], 2))
D_cens[:, 0] = -sp.stats.norm.pdf(lT, loc=self.loc, scale=self.scale) / \
(1 - self.dist.cdf(T) + self.eps)
D_cens[:, 1] = -Z * sp.stats.norm.pdf(lT, loc=self.loc, scale=self.scale) / \
(1 - self.dist.cdf(T) + self.eps)
D_cens[:, 0] = -sp.stats.norm.pdf(lT, loc=self.loc, scale=self.scale) / \
(1 - self.dist.cdf(T) + self.eps)
D_cens[:, 1] = -Z * sp.stats.norm.pdf(lT, loc=self.loc, scale=self.scale) / \
(1 - self.dist.cdf(T) + self.eps)
return (1-E) * D_cens + E * D_uncens
def metric(self):
FI = | np.zeros((self.loc.shape[0], 2, 2)) | numpy.zeros |
import numpy as np
import re
from commands.AbstractCommand import AbstractCommand
class roll(AbstractCommand):
type = str()
def __init__(self, command_specific_setup):
self.type = command_specific_setup
def execute(self, command_input):
diceInput = command_input.partition("d") #returns a tuple with index 0 being the x, index 1 being d, index 2 being the n
if not re.search(r'^\d*?d\d+$',command_input):
return "the vibes are simply rancid"
n = int(diceInput[2])
x = int(diceInput[0])
if self.type == 'ad':
return(xdn_ad(x,n))
elif self.type == 'da':
return(xdn_da(x,n))
else:
return (xdn(x,n))
def dn(n): #a die with n sides
return np.random.randint(1,n+1,1)
def xdn(x, n): #x dice with n sides
if (x < 1) or (n < 1):
return("numprob")
dice = | np.random.randint(1,n+1,x) | numpy.random.randint |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for horizon_util."""
from . import horizon_util
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
class HorizonUtilTest(parameterized.TestCase):
def test_int_frame_horizons(self):
radius = np.array([0.1, 0.3, 1, 3])
actual = horizon_util.int_frame_horizon(radius, num_frames=10, time_scale=5)
expected = [0, 1, 5, 9]
np.testing.assert_equal(actual, expected)
self.assertTrue(np.issubdtype(actual.dtype, np.integer))
def test_int_frame_horizons_infinite(self):
num_frames = 10
radius = np.array([-np.inf, 0, np.inf])
actual = horizon_util.int_frame_horizon(radius, num_frames, time_scale=1)
expected = [0, 0, num_frames - 1]
| np.testing.assert_equal(actual, expected) | numpy.testing.assert_equal |
import glob
import numpy as np
from tqdm import tqdm
def opticflow(x1, y1, x2, y2):
# caculate the optic flow of 2 opints
# return dx dy
# checked
dx = x2 - x1
dy = y2 - y1
return dx, dy
def get_opticflow(data_before, data, model, kp_num, people_num = 0):
# one people
# get dx, dy from 2 frames
# checked
if data[model][people_num, kp_num, 2] != 0 and data_before[model][people_num, kp_num, 2] != 0:
# if curray is not 0
x2 = data[model][people_num, kp_num, 0]
y2 = data[model][people_num, kp_num, 1]
x1 = data_before[model][people_num, kp_num, 0]
y1 = data_before[model][people_num, kp_num, 1]
dx, dy = opticflow(x1, y1, x2, y2)
# out = [dx, dy]
else:
# out = None
dx = dy = 0
return dx, dy
def get_coordinate(data, model, kp_num, people_num = 0):
# one people
# get coordinate from one frame
# checked
if data[model][people_num, kp_num, 2] != 0:
x = data[model][people_num, kp_num, 0]
y = data[model][people_num, kp_num, 1]
# out = [x, y]
else:
# out = None
x = y = None
return x, y
def load_FV(folder):
# folder: W2_6_000000/
files = sorted(glob.glob(folder + '/data/*'))
data_before = None
out_list = []
first_data = np.load(files[0])
body1_x, body1_y = get_coordinate(first_data, 'pose', 1)
body8_x, body8_y = get_coordinate(first_data, 'pose', 8)
rate = 300 / ((body1_x - body8_x)**2 + (body1_y - body8_y)**2)**(1/2)
for i in files:
# i: 000000.npz
data = np.load(i)
body1_x, body1_y = get_coordinate(data, 'pose', 1)
body8_x, body8_y = get_coordinate(data, 'pose', 8)
if data_before != None:
FV_1_dx, FV_1_dy = get_opticflow(data_before, data, 'pose', 4)
FV_2_dx, FV_2_dy = get_opticflow(data_before, data, 'pose', 7)
FV_3_dx, FV_3_dy = get_opticflow(data_before, data, 'pose', 3)
FV_4_dx, FV_4_dy = get_opticflow(data_before, data, 'pose', 6)
FV_5_dx, FV_5_dy = get_coordinate(data, 'pose', 4)
FV_6_dx, FV_6_dy = get_coordinate(data, 'pose', 7)
FV_7_dx, FV_7_dy = get_coordinate(data, 'pose', 3)
FV_8_dx, FV_8_dy = get_coordinate(data, 'pose', 6)
FV_5_dx, FV_5_dy = FV_5_dx - body1_x, FV_5_dy - body1_y
FV_6_dx, FV_6_dy = FV_6_dx - body1_x, FV_6_dy - body1_y
FV_7_dx, FV_7_dy = FV_7_dx - body1_x, FV_7_dy - body1_y
FV_8_dx, FV_8_dy = FV_8_dx - body1_x, FV_8_dy - body1_y
FV_9_dx, FV_9_dy = get_opticflow(data_before, data, 'pose', 1)
FV_10_dx, FV_10_dy = get_opticflow(data_before, data, 'pose', 0)
else:
FV_1_dx = FV_1_dy = FV_2_dx = FV_2_dy = FV_3_dx = FV_3_dy = FV_4_dx = FV_4_dy = FV_9_dx = FV_9_dy = FV_10_dx = FV_10_dy = 0
FV_5_dx, FV_5_dy = get_coordinate(data, 'pose', 4)
FV_6_dx, FV_6_dy = get_coordinate(data, 'pose', 7)
FV_7_dx, FV_7_dy = get_coordinate(data, 'pose', 3)
FV_8_dx, FV_8_dy = get_coordinate(data, 'pose', 6)
FV_5_dx, FV_5_dy = FV_5_dx - body1_x, FV_5_dy - body1_y
FV_6_dx, FV_6_dy = FV_6_dx - body1_x, FV_6_dy - body1_y
FV_7_dx, FV_7_dy = FV_7_dx - body1_x, FV_7_dy - body1_y
FV_8_dx, FV_8_dy = FV_8_dx - body1_x, FV_8_dy - body1_y
out = np.array([FV_1_dx, FV_1_dy, FV_2_dx, FV_2_dy, FV_3_dx, FV_3_dy, FV_4_dx, FV_4_dy, FV_5_dx, FV_5_dy, FV_6_dx, FV_6_dy, FV_7_dx, FV_7_dy, FV_8_dx, FV_8_dy, FV_9_dx, FV_9_dy, FV_10_dx, FV_10_dy]) * rate
data_before = data
out_list.append(out)
out_list = np.array(out_list)
return out_list
def load_dataset(video_path):
folders = sorted(glob.glob(video_path + '/*'))
labels = []
fv_list = []
name_list = []
print('loading dataset ...')
for folder in tqdm(folders):
# folder: W2_6_000000/
folder_name = folder.split('/')[-1]
with open(folder + '/label.txt', 'r') as f:
label = f.read()
if label == '手を回転させる':
labels.append(0)
fv = load_FV(folder)
fv_list.append(fv)
name_list.append(folder_name)
elif label == '黒板で手をスライドさせる':
labels.append(1)
fv = load_FV(folder)
fv_list.append(fv)
name_list.append(folder_name)
elif label == '黒板に指す':
labels.append(2)
fv = load_FV(folder)
fv_list.append(fv)
name_list.append(folder_name)
else:
pass
y = np.array(labels)
x = np.array(fv_list)
name = np.array(name_list)
return x, y, name
if __name__ == '__main__':
x, y, name = load_dataset('/Users/songminglun/Documents/ILCS/data/video')
np.save('/Users/songminglun/Documents/ILCS/data/label.npy', y)
np.save('/Users/songminglun/Documents/ILCS/data/path_name.npy', name)
| np.save('/Users/songminglun/Documents/ILCS/data/feature_value.npy', x) | numpy.save |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(139, 'I 4/m m m', transformations)
space_groups[139] = sg
space_groups['I 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(140, 'I 4/m c m', transformations)
space_groups[140] = sg
space_groups['I 4/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(141, 'I 41/a m d :2', transformations)
space_groups[141] = sg
space_groups['I 41/a m d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(142, 'I 41/a c d :2', transformations)
space_groups[142] = sg
space_groups['I 41/a c d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(143, 'P 3', transformations)
space_groups[143] = sg
space_groups['P 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(144, 'P 31', transformations)
space_groups[144] = sg
space_groups['P 31'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(145, 'P 32', transformations)
space_groups[145] = sg
space_groups['P 32'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(146, 'R 3 :H', transformations)
space_groups[146] = sg
space_groups['R 3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(147, 'P -3', transformations)
space_groups[147] = sg
space_groups['P -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(148, 'R -3 :H', transformations)
space_groups[148] = sg
space_groups['R -3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(149, 'P 3 1 2', transformations)
space_groups[149] = sg
space_groups['P 3 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(150, 'P 3 2 1', transformations)
space_groups[150] = sg
space_groups['P 3 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(151, 'P 31 1 2', transformations)
space_groups[151] = sg
space_groups['P 31 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(152, 'P 31 2 1', transformations)
space_groups[152] = sg
space_groups['P 31 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(153, 'P 32 1 2', transformations)
space_groups[153] = sg
space_groups['P 32 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(154, 'P 32 2 1', transformations)
space_groups[154] = sg
space_groups['P 32 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(155, 'R 3 2 :H', transformations)
space_groups[155] = sg
space_groups['R 3 2 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(156, 'P 3 m 1', transformations)
space_groups[156] = sg
space_groups['P 3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(157, 'P 3 1 m', transformations)
space_groups[157] = sg
space_groups['P 3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(158, 'P 3 c 1', transformations)
space_groups[158] = sg
space_groups['P 3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(159, 'P 3 1 c', transformations)
space_groups[159] = sg
space_groups['P 3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(160, 'R 3 m :H', transformations)
space_groups[160] = sg
space_groups['R 3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(161, 'R 3 c :H', transformations)
space_groups[161] = sg
space_groups['R 3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(162, 'P -3 1 m', transformations)
space_groups[162] = sg
space_groups['P -3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(163, 'P -3 1 c', transformations)
space_groups[163] = sg
space_groups['P -3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(164, 'P -3 m 1', transformations)
space_groups[164] = sg
space_groups['P -3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(165, 'P -3 c 1', transformations)
space_groups[165] = sg
space_groups['P -3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(166, 'R -3 m :H', transformations)
space_groups[166] = sg
space_groups['R -3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(167, 'R -3 c :H', transformations)
space_groups[167] = sg
space_groups['R -3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(168, 'P 6', transformations)
space_groups[168] = sg
space_groups['P 6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(169, 'P 61', transformations)
space_groups[169] = sg
space_groups['P 61'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(170, 'P 65', transformations)
space_groups[170] = sg
space_groups['P 65'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(171, 'P 62', transformations)
space_groups[171] = sg
space_groups['P 62'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(172, 'P 64', transformations)
space_groups[172] = sg
space_groups['P 64'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(173, 'P 63', transformations)
space_groups[173] = sg
space_groups['P 63'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(174, 'P -6', transformations)
space_groups[174] = sg
space_groups['P -6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(175, 'P 6/m', transformations)
space_groups[175] = sg
space_groups['P 6/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(176, 'P 63/m', transformations)
space_groups[176] = sg
space_groups['P 63/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(177, 'P 6 2 2', transformations)
space_groups[177] = sg
space_groups['P 6 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(178, 'P 61 2 2', transformations)
space_groups[178] = sg
space_groups['P 61 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(179, 'P 65 2 2', transformations)
space_groups[179] = sg
space_groups['P 65 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(180, 'P 62 2 2', transformations)
space_groups[180] = sg
space_groups['P 62 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(181, 'P 64 2 2', transformations)
space_groups[181] = sg
space_groups['P 64 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(182, 'P 63 2 2', transformations)
space_groups[182] = sg
space_groups['P 63 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(183, 'P 6 m m', transformations)
space_groups[183] = sg
space_groups['P 6 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(184, 'P 6 c c', transformations)
space_groups[184] = sg
space_groups['P 6 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(185, 'P 63 c m', transformations)
space_groups[185] = sg
space_groups['P 63 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(186, 'P 63 m c', transformations)
space_groups[186] = sg
space_groups['P 63 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(187, 'P -6 m 2', transformations)
space_groups[187] = sg
space_groups['P -6 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(188, 'P -6 c 2', transformations)
space_groups[188] = sg
space_groups['P -6 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(189, 'P -6 2 m', transformations)
space_groups[189] = sg
space_groups['P -6 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(190, 'P -6 2 c', transformations)
space_groups[190] = sg
space_groups['P -6 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(191, 'P 6/m m m', transformations)
space_groups[191] = sg
space_groups['P 6/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(192, 'P 6/m c c', transformations)
space_groups[192] = sg
space_groups['P 6/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = | N.array([0,1,0,1,0,0,0,0,-1]) | numpy.array |
import numpy as np
from torchnet.meter import AverageValueMeter
from visdom import Visdom
class Trainer:
def __init__(self, args, model):
self.name = args.name
self.model = model
self.l1win = None
self.l2win = None
self.l1meter = AverageValueMeter()
self.l2meter = AverageValueMeter()
self.visdom = Visdom(port=args.vis_port) if args.vis_steps > 0 else None
@property
def mode(self):
return 'training' if self.model.training else 'testing'
@property
def losses(self):
return self.l1meter.value()[0], self.l2meter.value()[1]
def reset(self):
self.l1meter.reset()
self.l2meter.reset()
def log_losses(self, epoch, step):
l1, l2 = self.losses
message = f'{self.name} is {self.mode} (epoch: {epoch}, step: {step}) '
message += f'l1 average: {l1}, l2 average: {l2}'
print(message)
def vis_losses(self, epoch):
l1, l2 = self.losses
x, y1, y2 = np.array([epoch]), | np.array([l1]) | numpy.array |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
def rotate_pc_along_y(pc, rot_angle):
'''
Input:
pc: numpy array (N,C), first 3 channels are XYZ
z is facing forward, x is left ward, y is downward
rot_angle: rad scalar
Output:
pc: updated pc with XYZ rotated
'''
cosval = np.cos(rot_angle)
sinval = np.sin(rot_angle)
rotmat = np.array([[cosval, -sinval], [sinval, cosval]])
pc = pc.copy()
pc[:, [0, 2]] = np.dot(pc[:, [0, 2]], | np.transpose(rotmat) | numpy.transpose |
"""
Copyright (c) 2017, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of anybody else.
"""
import os
import numpy as np
class POWER:
class Data:
def __init__(self, data):
self.x = data.astype(np.float32)
self.N = self.x.shape[0]
def __init__(self, root):
trn, val, tst = load_data_normalised(root)
self.trn = self.Data(trn)
self.val = self.Data(val)
self.tst = self.Data(tst)
self.n_dims = self.trn.x.shape[1]
def load_data(root):
return np.load(os.path.join(root, 'power/data.npy'))
def load_data_split_with_noise(root):
rng = np.random.RandomState(42)
data = load_data(root)
rng.shuffle(data)
N = data.shape[0]
data = | np.delete(data, 3, axis=1) | numpy.delete |
import numpy as np
from scipy.signal import find_peaks, peak_widths, peak_prominences
from scipy.signal import savgol_filter
import matplotlib.pyplot as plt
from scipy.signal import find_peaks, peak_widths, peak_prominences
import pandas as pd
def dist_sarco(img, meta, lines, directory, plot=False, save = False):
dist = np.empty((len(lines), len(img)))
for frame, im in enumerate(img):
for num, li in enumerate(lines):
peak, _ = find_peaks(im[li[1], li[0]], distance=7)
dist_p = np.diff(peak)*meta['PhysicalSizeX']
dist[num, frame] = np.mean(dist_p)
dist_smooth = []
for i in range(len(dist)):
dist_smooth.append(savgol_filter(dist[i], 31, 5))
if plot:
fig, ax = plt.subplots(nrows = 1+(len(dist)*2), figsize=(12,36))
ax[0].imshow(img[0])
cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']
for i, li in enumerate(lines):
ax[0].plot(li[0], li[1], c=cycle[i])
k=0
j=1
for i in range(len(dist)):
k+=2
ax[j].plot(meta['Timepoint']*100, dist[i], label="Raw Data")
ax[j].plot(meta['Timepoint']*100, dist_smooth[i], label="Smoothed Data", c = cycle[i-1])
ax[j].legend()
ax[j].set_xlabel('Time in ms')
ax[j].set_ylabel('Sarcomere length (um)')
ax[j].set_title('Example 1')
positive_raw = 100-((dist[i] / np.percentile(dist[i], 90)))*100
positive_smooth = 100-((dist_smooth[i] / np.percentile(dist_smooth[i], 90)))*100
ax[k].plot(meta['Timepoint']*100, positive_raw, label="Raw Data")
ax[k].plot(meta['Timepoint']*100, positive_smooth, label="Smoothed Data")
ax[k].legend()
ax[k].set_xlabel('Time in ms')
ax[k].set_ylabel('Sarcomere shortening (%)')
j+=2
plt.savefig("sarcomere_ex1_data.pdf", transparent=True)
if save:
try:
filename = meta['Name']+'.pdf'
plt.savefig(directory+'/'+filename, transparent=True)
except FileNotFoundError:
plt.savefig(filename, transparent=True)
return dist_smooth
def av_dist_sarco(img, meta, lines, directory, plot=True, save = False):
mean_time = np.empty(len(img))
for frame, im in enumerate(img):
peaks_lst = []
for li in lines:
peaks, _ = find_peaks(im[li[1], li[0]], distance=7)
peaks_lst.append(peaks)
dists = []
for peak in peaks_lst:
dists.append(np.diff(peak)*meta['PhysicalSizeX'])
mean_time[frame] = np.mean( | np.hstack(dists) | numpy.hstack |
"""
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license
(https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import torch
from torch.utils.data import Dataset
import numpy as np
import time
import os
import cv2
import sys
import utils
from datasets.scannet_scene import ScanNetScene
class PlaneDatasetSingle(Dataset):
def __init__(self, options, config, split, random=True, loadNeighborImage=False, load_semantics=False, load_boundary=False):
self.options = options
self.config = config
self.split = split
self.random = random
self.dataFolder = options.dataFolder
self.scenes = []
self.sceneImageIndices = []
self.loadClassMap()
planenet_scene_ids_val = np.load('datasets/scene_ids_val.npy')
planenet_scene_ids_val = {scene_id.decode('utf-8'): True for scene_id in planenet_scene_ids_val}
with open(self.dataFolder + '/ScanNet/Tasks/Benchmark/scannetv1_' + split + '.txt') as f:
for line in f:
scene_id = line.strip()
if split == 'test':
## Remove scenes which are in PlaneNet's training set for fair comparison
if scene_id not in planenet_scene_ids_val:
continue
pass
scenePath = self.dataFolder + '/scans/' + scene_id
if not os.path.exists(scenePath + '/' + scene_id + '.txt') or not os.path.exists(scenePath + '/annotation/planes.npy'):
continue
scene = ScanNetScene(options, scenePath, scene_id, self.confident_labels, self.layout_labels, load_semantics=load_semantics, load_boundary=load_boundary)
self.scenes.append(scene)
self.sceneImageIndices += [[len(self.scenes) - 1, imageIndex] for imageIndex in range(len(scene.imagePaths))]
continue
pass
if random:
t = int(time.time() * 1000000)
np.random.seed(((t & 0xff000000) >> 24) +
((t & 0x00ff0000) >> 8) +
((t & 0x0000ff00) << 8) +
((t & 0x000000ff) << 24))
else:
np.random.seed(0)
pass
np.random.shuffle(self.sceneImageIndices)
self.invalid_indices = {}
with open(self.dataFolder + '/invalid_indices_' + split + '.txt', 'r') as f:
for line in f:
tokens = line.split(' ')
if len(tokens) == 3:
assert(int(tokens[2]) < 10000)
invalid_index = int(tokens[1]) * 10000 + int(tokens[2])
if invalid_index not in self.invalid_indices:
self.invalid_indices[invalid_index] = True
pass
pass
continue
pass
self.sceneImageIndices = [[sceneIndex, imageIndex] for sceneIndex, imageIndex in self.sceneImageIndices if (sceneIndex * 10000 + imageIndex) not in self.invalid_indices]
print('num images', len(self.sceneImageIndices))
self.anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
config.BACKBONE_SHAPES,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
self.loadNeighborImage = loadNeighborImage
return
def loadClassMap(self):
classLabelMap = {}
with open(self.dataFolder + '/scannetv2-labels.combined.tsv') as info_file:
line_index = 0
for line in info_file:
if line_index > 0:
line = line.split('\t')
key = line[1].strip()
if line[4].strip() != '':
label = int(line[4].strip())
else:
label = -1
pass
classLabelMap[key] = label
classLabelMap[key + 's'] = label
classLabelMap[key + 'es'] = label
pass
line_index += 1
continue
pass
confidentClasses = {'wall': True,
'floor': True,
'cabinet': True,
'bed': True,
'chair': False,
'sofa': False,
'table': True,
'door': True,
'window': True,
'bookshelf': False,
'picture': True,
'counter': True,
'blinds': False,
'desk': True,
'shelf': False,
'shelves': False,
'curtain': False,
'dresser': True,
'pillow': False,
'mirror': False,
'entrance': True,
'floor mat': True,
'clothes': False,
'ceiling': True,
'book': False,
'books': False,
'refridgerator': True,
'television': True,
'paper': False,
'towel': False,
'shower curtain': False,
'box': True,
'whiteboard': True,
'person': False,
'night stand': True,
'toilet': False,
'sink': False,
'lamp': False,
'bathtub': False,
'bag': False,
'otherprop': False,
'otherstructure': False,
'otherfurniture': False,
'unannotated': False,
'': False
}
self.confident_labels = {}
for name, confidence in confidentClasses.items():
if confidence and name in classLabelMap:
self.confident_labels[classLabelMap[name]] = True
pass
continue
self.layout_labels = {1: True, 2: True, 22: True, 9: True}
return
def __len__(self):
return len(self.sceneImageIndices)
def transformPlanes(self, transformation, planes):
planeOffsets = np.linalg.norm(planes, axis=-1, keepdims=True)
centers = planes
centers = np.concatenate([centers, np.ones((planes.shape[0], 1))], axis=-1)
newCenters = np.transpose(np.matmul(transformation, np.transpose(centers)))
newCenters = newCenters[:, :3] / newCenters[:, 3:4]
refPoints = planes - planes / np.maximum(planeOffsets, 1e-4)
refPoints = np.concatenate([refPoints, np.ones((planes.shape[0], 1))], axis=-1)
newRefPoints = np.transpose(np.matmul(transformation, np.transpose(refPoints)))
newRefPoints = newRefPoints[:, :3] / newRefPoints[:, 3:4]
planeNormals = newRefPoints - newCenters
planeNormals /= np.linalg.norm(planeNormals, axis=-1, keepdims=True)
planeOffsets = np.sum(newCenters * planeNormals, axis=-1, keepdims=True)
newPlanes = planeNormals * planeOffsets
return newPlanes
def __getitem__(self, index):
t = int(time.time() * 1000000)
np.random.seed(((t & 0xff000000) >> 24) +
((t & 0x00ff0000) >> 8) +
((t & 0x0000ff00) << 8) +
((t & 0x000000ff) << 24))
if self.config.ANCHOR_TYPE == 'layout':
return self.getItemLayout(index)
if self.config.ANCHOR_TYPE == 'structure':
return self.getItemStructure(index)
while True:
if self.random:
index = np.random.randint(len(self.sceneImageIndices))
else:
index = index % len(self.sceneImageIndices)
pass
sceneIndex, imageIndex = self.sceneImageIndices[index]
scene = self.scenes[sceneIndex]
try:
image, planes, plane_info, segmentation, depth, camera, extrinsics = scene[imageIndex]
if len(planes) == 0:
index += 1
continue
except:
index += 1
continue
pass
if segmentation.max() < 0:
index += 1
continue
break
instance_masks = []
class_ids = []
parameters = []
if len(planes) > 0:
if 'joint' in self.config.ANCHOR_TYPE:
distances = np.linalg.norm(np.expand_dims(planes, 1) - self.config.ANCHOR_PLANES, axis=-1)
plane_anchors = distances.argmin(-1)
elif self.config.ANCHOR_TYPE == 'Nd':
plane_offsets = np.linalg.norm(planes, axis=-1)
plane_normals = planes / np.expand_dims(plane_offsets, axis=-1)
distances_N = np.linalg.norm(np.expand_dims(plane_normals, 1) - self.config.ANCHOR_NORMALS, axis=-1)
normal_anchors = distances_N.argmin(-1)
distances_d = np.abs(np.expand_dims(plane_offsets, -1) - self.config.ANCHOR_OFFSETS)
offset_anchors = distances_d.argmin(-1)
elif self.config.ANCHOR_TYPE in ['normal', 'patch']:
plane_offsets = np.linalg.norm(planes, axis=-1)
plane_normals = planes / np.expand_dims(plane_offsets, axis=-1)
distances_N = np.linalg.norm(np.expand_dims(plane_normals, 1) - self.config.ANCHOR_NORMALS, axis=-1)
normal_anchors = distances_N.argmin(-1)
elif self.config.ANCHOR_TYPE == 'normal_none':
plane_offsets = np.linalg.norm(planes, axis=-1)
plane_normals = planes / np.expand_dims(plane_offsets, axis=-1)
pass
pass
for planeIndex, plane in enumerate(planes):
m = segmentation == planeIndex
if m.sum() < 1:
continue
instance_masks.append(m)
if self.config.ANCHOR_TYPE == 'none':
class_ids.append(1)
parameters.append(np.concatenate([plane, np.zeros(1)], axis=0))
elif 'joint' in self.config.ANCHOR_TYPE:
class_ids.append(plane_anchors[planeIndex] + 1)
residual = plane - self.config.ANCHOR_PLANES[plane_anchors[planeIndex]]
parameters.append(np.concatenate([residual, np.zeros(1)], axis=0))
elif self.config.ANCHOR_TYPE == 'Nd':
class_ids.append(normal_anchors[planeIndex] * len(self.config.ANCHOR_OFFSETS) + offset_anchors[planeIndex] + 1)
normal = plane_normals[planeIndex] - self.config.ANCHOR_NORMALS[normal_anchors[planeIndex]]
offset = plane_offsets[planeIndex] - self.config.ANCHOR_OFFSETS[offset_anchors[planeIndex]]
parameters.append(np.concatenate([normal, np.array([offset])], axis=0))
elif self.config.ANCHOR_TYPE == 'normal':
class_ids.append(normal_anchors[planeIndex] + 1)
normal = plane_normals[planeIndex] - self.config.ANCHOR_NORMALS[normal_anchors[planeIndex]]
parameters.append(np.concatenate([normal, np.zeros(1)], axis=0))
elif self.config.ANCHOR_TYPE == 'normal_none':
class_ids.append(1)
normal = plane_normals[planeIndex]
parameters.append(np.concatenate([normal, np.zeros(1)], axis=0))
else:
assert(False)
pass
continue
parameters = np.array(parameters, dtype=np.float32)
mask = np.stack(instance_masks, axis=2)
class_ids = np.array(class_ids, dtype=np.int32)
image, image_metas, gt_class_ids, gt_boxes, gt_masks, gt_parameters = load_image_gt(self.config, index, image, mask, class_ids, parameters, augment=self.split == 'train')
## RPN Targets
rpn_match, rpn_bbox = build_rpn_targets(image.shape, self.anchors,
gt_class_ids, gt_boxes, self.config)
## If more instances than fits in the array, sub-sample from them.
if gt_boxes.shape[0] > self.config.MAX_GT_INSTANCES:
ids = np.random.choice(
np.arange(gt_boxes.shape[0]), self.config.MAX_GT_INSTANCES, replace=False)
gt_class_ids = gt_class_ids[ids]
gt_boxes = gt_boxes[ids]
gt_masks = gt_masks[:, :, ids]
gt_parameters = gt_parameters[ids]
## Add to batch
rpn_match = rpn_match[:, np.newaxis]
image = utils.mold_image(image.astype(np.float32), self.config)
depth = np.concatenate([np.zeros((80, 640)), depth, np.zeros((80, 640))], axis=0)
segmentation = np.concatenate([np.full((80, 640), fill_value=-1, dtype=np.int32), segmentation, np.full((80, 640), fill_value=-1, dtype=np.int32)], axis=0)
info = [image.transpose((2, 0, 1)).astype(np.float32), image_metas, rpn_match, rpn_bbox.astype(np.float32), gt_class_ids, gt_boxes.astype(np.float32), gt_masks.transpose((2, 0, 1)).astype(np.float32), gt_parameters, depth.astype(np.float32), segmentation, camera.astype(np.float32)]
if self.loadNeighborImage:
if imageIndex + self.options.frameGap < len(scene.imagePaths):
imagePath = scene.imagePaths[imageIndex + self.options.frameGap]
else:
imagePath = scene.imagePaths[imageIndex - self.options.frameGap]
pass
image_2 = cv2.imread(imagePath)
image_2 = cv2.resize(image_2, (self.config.IMAGE_MAX_DIM, self.config.IMAGE_MAX_DIM))
info.append(image_2.transpose((2, 0, 1)).astype(np.float32))
extrinsics_2_inv = []
posePath = imagePath.replace('color', 'pose').replace('.jpg', '.txt')
with open(posePath, 'r') as f:
for line in f:
extrinsics_2_inv += [float(value) for value in line.strip().split(' ') if value.strip() != '']
continue
f.close()
pass
extrinsics_2_inv = np.array(extrinsics_2_inv).reshape((4, 4))
extrinsics_2 = np.linalg.inv(extrinsics_2_inv)
temp = extrinsics_2[1].copy()
extrinsics_2[1] = extrinsics_2[2]
extrinsics_2[2] = -temp
transformation = np.matmul(extrinsics_2, np.linalg.inv(extrinsics))
if np.any(np.isnan(transformation)):
transformation = np.concatenate([np.diag(np.ones(3)), np.zeros((3, 1))], axis=-1)
pass
rotation = transformation[:3, :3]
translation = transformation[:3, 3]
axis, angle = utils.rotationMatrixToAxisAngle(rotation)
pose = np.concatenate([translation, axis * angle], axis=0).astype(np.float32)
info.append(pose)
info.append(scene.scenePath + ' ' + str(imageIndex))
pass
return info
def getAnchorPlanesNormalOffset(self, visualize=False):
for k in [7, ]:
print('k', k)
filename_N = self.dataFolder + '/anchor_planes_N_' + str(k) + '.npy'
filename_d = self.dataFolder + '/anchor_planes_d.npy'
if os.path.exists(filename_N) and os.path.exists(filename_d) and False:
return
if os.path.exists('test/anchor_planes/all_planes.npy'):
all_planes = np.load('test/anchor_planes/all_planes.npy')
else:
all_planes = []
for sceneIndex, imageIndex in self.sceneImageIndices[:10000]:
if len(all_planes) % 100 == 0:
print(len(all_planes))
pass
scene = self.scenes[sceneIndex]
image, planes, plane_info, segmentation, depth, camera, extrinsics = scene[imageIndex]
planes = planes[np.linalg.norm(planes, axis=-1) > 1e-4]
if len(planes) == 0:
continue
all_planes.append(planes)
continue
all_planes = np.concatenate(all_planes, axis=0)
np.save('test/anchor_planes/all_planes.npy', all_planes)
pass
from sklearn.cluster import KMeans
num_anchor_planes_N = k
num_anchor_planes_d = 3
offsets = np.linalg.norm(all_planes, axis=-1)
normals = all_planes / np.expand_dims(offsets, -1)
kmeans_N = KMeans(n_clusters=num_anchor_planes_N).fit(normals)
self.anchor_planes_N = kmeans_N.cluster_centers_
## Global offset anchors
kmeans_d = KMeans(n_clusters=num_anchor_planes_d).fit(np.expand_dims(offsets, -1))
self.anchor_planes_d = kmeans_d.cluster_centers_
if visualize:
color_map = utils.ColorPalette(max(num_anchor_planes_N, num_anchor_planes_d)).getColorMap()
normals_rotated = normals.copy()
normals_rotated[:, 1] = normals[:, 2]
normals_rotated[:, 2] = -normals[:, 1]
plane_cloud = np.concatenate([normals_rotated, color_map[kmeans_N.labels_]], axis=-1)
utils.writePointCloud('test/anchor_planes/anchor_planes_N.ply', plane_cloud)
plane_cloud = np.concatenate([all_planes, color_map[kmeans_d.labels_]], axis=-1)
utils.writePointCloud('test/anchor_planes/anchor_planes_d.ply', plane_cloud)
width = 500
height = 500
Us = np.round(np.arctan2(normals[:, 1], normals[:, 0]) / np.pi * width).astype(np.int32)
Vs = np.round((1 - (np.arcsin(normals[:, 2]) + np.pi / 2) / np.pi) * height).astype(np.int32)
indices = Vs * width + Us
validMask = np.logical_and(np.logical_and(Us >= 0, Us < width), np.logical_and(Vs >= 0, Vs < height))
indices = indices[validMask]
normalImage = np.zeros((height * width, 3))
normalImage[indices] = color_map[kmeans_N.labels_[validMask]]
normalImage = normalImage.reshape((height, width, 3))
cv2.imwrite('test/anchor_planes/normal_color_' + str(k) + '.png', normalImage)
exit(1)
pass
np.save(filename_N, self.anchor_planes_N)
np.save(filename_d, self.anchor_planes_d)
continue
return
def load_image_gt(config, image_id, image, depth, mask, class_ids, parameters, augment=False,
use_mini_mask=True):
"""Load and return ground truth data for an image (image, mask, bounding boxes).
augment: If true, apply random image augmentation. Currently, only
horizontal flipping is offered.
use_mini_mask: If False, returns full-size masks that are the same height
and width as the original image. These can be big, for example
1024x1024x100 (for 100 instances). Mini masks are smaller, typically,
224x224 and are generated by extracting the bounding box of the
object and resizing it to MINI_MASK_SHAPE.
Returns:
image: [height, width, 3]
shape: the original shape of the image before resizing and cropping.
class_ids: [instance_count] Integer class IDs
bbox: [instance_count, (y1, x1, y2, x2)]
mask: [height, width, instance_count]. The height and width are those
of the image unless use_mini_mask is True, in which case they are
defined in MINI_MASK_SHAPE.
"""
## Load image and mask
shape = image.shape
image, window, scale, padding = utils.resize_image(
image,
min_dim=config.IMAGE_MAX_DIM,
max_dim=config.IMAGE_MAX_DIM,
padding=config.IMAGE_PADDING)
mask = utils.resize_mask(mask, scale, padding)
## Random horizontal flips.
if augment and False:
if np.random.randint(0, 1):
image = np.fliplr(image)
mask = np.fliplr(mask)
depth = np.fliplr(depth)
pass
pass
## Bounding boxes. Note that some boxes might be all zeros
## if the corresponding mask got cropped out.
## bbox: [num_instances, (y1, x1, y2, x2)]
bbox = utils.extract_bboxes(mask)
## Resize masks to smaller size to reduce memory usage
if use_mini_mask:
mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)
pass
active_class_ids = np.ones(config.NUM_CLASSES, dtype=np.int32)
## Image meta data
image_meta = utils.compose_image_meta(image_id, shape, window, active_class_ids)
if config.NUM_PARAMETER_CHANNELS > 0:
if config.OCCLUSION:
depth = utils.resize_mask(depth, scale, padding)
mask_visible = utils.minimize_mask(bbox, depth, config.MINI_MASK_SHAPE)
mask = np.stack([mask, mask_visible], axis=-1)
else:
depth = np.expand_dims(depth, -1)
depth = utils.resize_mask(depth, scale, padding).squeeze(-1)
depth = utils.minimize_depth(bbox, depth, config.MINI_MASK_SHAPE)
mask = np.stack([mask, depth], axis=-1)
pass
pass
return image, image_meta, class_ids, bbox, mask, parameters
def build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):
"""Given the anchors and GT boxes, compute overlaps and identify positive
anchors and deltas to refine them to match their corresponding GT boxes.
anchors: [num_anchors, (y1, x1, y2, x2)]
gt_class_ids: [num_gt_boxes] Integer class IDs.
gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]
Returns:
rpn_match: [N] (int32) matches between anchors and GT boxes.
1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
"""
## RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)
## RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]
rpn_bbox = | np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4)) | numpy.zeros |
import matplotlib.pyplot as plt
import numpy as np
from MLG import imagepath, paperpath, path
from imageio import imread
import matplotlib.cbook as cbook
from MLG.utils import color_own
from matplotlib import rc
__all__ = ['dark','Image_Window','Image_precision','Image_Illustration','Image_Illustration_Multi','Image_compare_micro','Image_astroshift', 'create_all_Image']
def dark(onof = 0):
if onof is 'on': plt.style.use('dark_background')
elif onof is 'off': plt.style.use('default')
elif onof == True: plt.style.use('dark_background')
else: plt.style.use('default')
def Image_Window(string = 'resolve_Window', pres = False):
'''------------------------------------------------------------
Description:
---------------------------------------------------------------
Input:
---------------------------------------------------------------
Output:
------------------------------------------------------------'''
dark= 'black' in plt.rcParams['savefig.facecolor']
if dark:
string = 'dark_'+string
black = color_own([0.7,0.7,0.7,1])
else: black = color_own([0.,0.,0.,1])
c1 = color_own([0,1,1,1])
c2 = color_own([1,0,0,1])
c3 = color_own([1,1,0.2,1])
c4 = color_own([0.4,0.4,0.4,1])
c_star = [c1,c2,c3,c4]
c_grid1 = color_own([0,int(dark),1,1])
c_grid2 = color_own([0.5,1,0,1])
star= np.array([[0, 0],[0.1,0.9],[-1,-1.1],[-0.5,0.1]])
fig = plt.figure(figsize = [12,12])
x_width = 0.059
y_width = 0.177
#------------------------------------------------------------
# axis
plt.xticks( fontsize = 25)
plt.yticks( fontsize = 25)
plt.grid(True)
plt.axis('equal')
plt.axis([-1.5,1.5,-1.4,1.6])
plt.ylabel('Across-scan direction (AC) [arcsec]', fontsize = 30)
plt.xlabel('Along-scan direction (AL) [arcsec]', fontsize = 30)
#------------------------------------------------------------
#------------------------------------------------------------
#Grid Major Star
for i in range(-6,7):
plt.plot([-6*x_width,6*x_width], [i*y_width,i*y_width], c = c_grid1,linewidth = 3)
plt.plot([i*x_width,i*x_width], [-6*y_width,6*y_width], c = c_grid1, linewidth = 3)
plt.text(0,1.4,"Along-scan direction\n $12\,\mathrm{pix} \\times 0.059 \mathrm{''/pix} = 0.708\mathrm{''}$",fontsize = 25, verticalalignment = 'center', horizontalalignment = 'center', rotation = 0)
plt.text(0.7,0,"Across-scan direction\n $12\,\mathrm{pix} \\times 0.177 \mathrm{''/pix} = 2.124\mathrm{''}$",fontsize = 25, verticalalignment = 'center', horizontalalignment = 'center', rotation = 90)
plt.arrow(0,6*y_width+2*x_width, -6*x_width+0.02,0,color= black, head_width=0.1,\
overhang = 0.5, length_includes_head=True ,zorder = 10, linewidth = 3)
plt.arrow(0,6*y_width+2*x_width, 6*x_width-0.02,0,color= black, head_width=0.1,\
overhang = 0.5, length_includes_head=True ,zorder = 10, linewidth = 3)
plt.arrow(8*x_width,0,0, -6*y_width+0.02,color= black, head_width=0.1,\
overhang = 0.5, length_includes_head=True ,zorder = 10, linewidth = 3)
plt.arrow(8*x_width,0,0, 6*y_width-0.02,color= black, head_width=0.1,\
overhang = 0.5, length_includes_head=True ,zorder = 10, linewidth = 3)
plt.scatter(star[:1,0], star[:1,1], marker=(5, 1),c = c_star[:1], s = [3000], zorder = 1000)
if pres: fig.savefig(imagepath + string + '_1.png', format = 'png')
#------------------------------------------------------------
#------------------------------------------------------------
#Grid Minor Star
plt.scatter(star[1:3,0], star[1:3,1], marker=(5, 1),c = c_star[1:3], s = [2000,2000], zorder = 1000)
if pres: fig.savefig(imagepath + string + '_2.png', format = 'png')
for i in range(-5,8):
plt.plot([-15*x_width,-6*x_width], [i*y_width,i*y_width], c = c_grid2,linewidth = 3, zorder = -1)
for i in range(-15,-5):
plt.plot([i*x_width,i*x_width], [-5*y_width,7*y_width], c = c_grid2, linewidth = 3, zorder = -1)
plt.scatter(star[3:,0], star[3:,1], marker=(5, 1),c = c_star[3:], s = [2000], zorder = 1000)
if pres: fig.savefig(imagepath + string + '_3.png', format = 'png')
#------------------------------------------------------------
fig.savefig(imagepath + string + '.png', format = 'png')
print('Create Image: '+ imagepath+ string + '.png')
if paperpath is not None: fig.savefig(paperpath + string + '.png', format = 'png')
plt.close(fig)
def Image_precision(string = 'Sig_vs_Gmag', Gaia_precision = path+'InputTable/resolution_Gaia.png', pres = False):
'''------------------------------------------------------------
Description:
---------------------------------------------------------------
Input:
---------------------------------------------------------------
Output:
------------------------------------------------------------'''
dark= 'black' in plt.rcParams['savefig.facecolor']
if dark:
string = 'dark_'+string
black = color_own([0.7,0.7,0.7,1])
color1 = color_own([0.85,0,0,1])
color2 = color_own([0,0,1,1])
color3 = color_own([0,1,1,1])
color4 = color_own([0.5,1,0,1])
color5 = color_own([1,1,0,1])
else:
black = color_own([0.,0.,0.,1])
color1 = color_own([0.85,0,0,1])
color2 = color_own([0,0,1,1])
color3 = color_own([0,1,1,1])
color4 = color_own([0,1,0,1])
color5 = color_own([1,1,0,1])
fig = plt.figure(figsize = [12,10])
Gmag = np.arange(4,22,0.01)
datafile = cbook.get_sample_data(Gaia_precision)
img = imread(datafile)
z = 10 ** (0.4 * (np.maximum(Gmag, 14) - 15)) #(14-np.minimum(Gmag, 14))
z2 = 10 ** (0.4 * (np.maximum(Gmag, 12) - 15))
sig_pi = (-1.631 + 680.766 * z2 + 32.732 * z2**2)**0.5/1000
sig_fov2 =(-1.631 + 680.766 * z + 32.732 * z**2)**0.5/1000 *7.75 +0.1
sig_fov3 = sig_fov2 / np.sqrt(9)
plt.plot([0,1],[-5,-5], c = color1, linewidth = 3, label = 'formal precision from Gaia DR2 (per CCD)' )
plt.plot([0,1],[-5,-5], c = color2, linewidth = 3, label = 'actual precision from Gaia DR2 (per CCD)' )
plt.yticks([np.log10(i) for i in [20,10, 5,2,1, 0.5,0.2,0.1, 0.05,0.02, 0.01]],[20,10, 5,2,1, 0.5,0.2,0.1, 0.05,0.02,0.01], fontsize = 25)
plt.xticks( fontsize = 25)
plt.ylabel('Standard deviation of AL field angle [mas]', fontsize = 30)
plt.xlabel('G magnitude', fontsize = 30)
plt.imshow(img, zorder=0, extent=[5, 21.04, np.log10(0.0195),np.log10(10)])
plt.axis('auto')
plt.xlim([4,22])
plt.ylim([np.log10(0.005),np.log10(40)])
if pres:
plt.legend(loc = 'upper left',fontsize = 20)
fig.savefig(imagepath + string + '_1.png', format = 'png')
plt.plot(Gmag,np.log10(sig_pi), '--',c = color3, dashes =(5,5), linewidth = 3, label= 'predicted end-of-mission parallax error')
if pres:
plt.legend(loc = 'upper left',fontsize = 20)
fig.savefig(imagepath + string + '_2.png', format = 'png')
plt.plot(Gmag,np.log10(sig_fov2), ':' , c = color4, linewidth = 5, label= 'used Standard deviation (per CCD)' )
if pres:
plt.legend(loc = 'upper left',fontsize = 20)
fig.savefig(imagepath + string + '_3.png', format = 'png')
plt.plot(Gmag,np.log10(sig_fov3) ,c = color5,linewidth = 7, label= 'used Standard deviation for 9 CCD observations' )
plt.plot([5, 21.04, 21.04,5,5], [np.log10(0.0195),np.log10(0.0195),np.log10(10),np.log10(10),np.log10(0.0195)], linewidth = 2, color = [0.5,0.5,0.5,1], zorder = 0.1)
plt.axis('auto')
plt.xlim([4,22])
plt.ylim([np.log10(0.005),np.log10(40)])
plt.legend(loc = 'upper left',fontsize = 20)
fig.savefig(imagepath + string + '.png', format = 'png')
print('Create Image: '+ imagepath+ string + '.png')
if paperpath is not None: fig.savefig(paperpath + string + '.png', format = 'png')
plt.close(fig)
def Image_Illustration(string = 'Illustration'):
'''------------------------------------------------------------
Description:
---------------------------------------------------------------
Input:
---------------------------------------------------------------
Output:
------------------------------------------------------------'''
dark= 'black' in plt.rcParams['savefig.facecolor']
if dark:
string = 'dark_'+string
black = color_own([0.7,0.7,0.7,1])
color1 = color_own([0,1,1,1])
color2 = color_own([1,0.5,0,1])
color3 = color_own([0.5,1,0,1])
color4 = color_own([1,0,1,1])
color5 = color_own([0,1,1,1])
color6 = color_own([0,1,1,1])
else:
black = color_own([0.,0.,0.,1])
color1 = color_own([0,0,1,1])
color2 = color_own([1,0.5,0,1])
color3 = color_own([0,1,0,1])
color4 = color_own([1,0,1,1])
color5 = color_own([0,1,1,1])
color6 = color_own([0,1,1,1])
t = np.array([12, 35, 41, 61, 73, 89])
scandir = np.array([0.1, 0.7, 0.4, 0.8 , 0.2, 0.1])*np.pi
x1 = np.linspace(1,13,100) + 0.3 * np.sin(np.linspace(np.pi/4,12*np.pi/4,100))
y1 = np.linspace(1,3,100) + 0.3* np.cos(np.linspace(np.pi/4,12*np.pi/4,100))
x2 = np.linspace(3,7,100)# + 0.03 * np.sin(np.linspace(np.pi/4,12*np.pi/4,100))
y2 = np.linspace(7,4.5,100)# + 0.03* np.cos(np.linspace(np.pi/4,12*np.pi/4,100))
d = np.sqrt((x1-x2)**2 + (y1-y2)**2)
TE = 1.5
X2 = x2 + (x2-x1) * TE/(d**2 +2*TE)
Y2 = y2 + (y2-y1) * TE/(d**2 +2*TE)
dX2 = x1-X2
dY2 = y1-Y2
dx2 = x1-x2
dy2 = y1-y2
fig = plt.figure(figsize= (12,8))
ax = plt.subplot(111)
ax.axis('equal')
ax.axis('off')
for i in range(len(t)):
xm1 =np.array([-1,1]) * np.cos(scandir[i]) + x1[t[i]]
ym1 =np.array([-1,1]) * np.sin(scandir[i]) + y1[t[i]]
xm2 =np.array([-1,1]) * np.cos(scandir[i]) + X2[t[i]]
ym2 =np.array([-1,1]) * np.sin(scandir[i]) + Y2[t[i]]
dsc = ((dx2[t[i]]).reshape(-1,1)*[np.sin(scandir[i]),np.cos(scandir[i])] \
+ (dy2[t[i]]).reshape(-1,1) *[-np.cos(scandir[i]),np.sin(scandir[i])])[0]
dSC = ((dX2[t[i]]).reshape(-1,1)*[np.sin(scandir[i]),np.cos(scandir[i])] \
+ (dY2[t[i]]).reshape(-1,1) *[-np.cos(scandir[i]),np.sin(scandir[i])])[0]
ttX2 = np.array([0,-dSC[1]/2,dSC[1]/2,0]) * np.cos(scandir[i]) + ([x1[t[i]],x1[t[i]],X2[t[i]],X2[t[i]]])
ttY2 = np.array([0,-dSC[1]/2,dSC[1]/2,0]) * np.sin(scandir[i]) +([y1[t[i]],y1[t[i]],Y2[t[i]],Y2[t[i]]])
ttx2 = np.array([0,-dsc[1]/2-0.2,dsc[1]/2-0.2,0]) * np.cos(scandir[i]) + ([x1[t[i]],x1[t[i]],x2[t[i]],x2[t[i]]])
tty2 = np.array([0,-dsc[1]/2-0.2,dsc[1]/2-0.2,0]) * np.sin(scandir[i]) +([y1[t[i]],y1[t[i]],y2[t[i]],y2[t[i]]])
if i % 2 == 0:
plt.arrow(ttx2[2],tty2[2], 0.0001*(ttx2[2]-ttx2[1]),0.0001*(tty2[2]-tty2[1]),color= color1, head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.arrow(ttx2[1],tty2[1], 0.0001*(ttx2[1]-ttx2[2]),0.0001*(tty2[1]-tty2[2]),color= color1, head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.arrow(ttX2[2],ttY2[2], 0.0001*(ttX2[2]-ttX2[1]),0.0001*(ttY2[2]-ttY2[1]),color= color2, head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.arrow(ttX2[1],ttY2[1], 0.0001*(ttX2[1]-ttX2[2]),0.0001*(ttY2[1]-ttY2[2]),color= color2, head_width=0.2,\
overhang = 0.5, length_includes_head=True, zorder = 10)
plt.plot(ttx2[0:2],tty2[0:2],color = black, linestyle= ':')
plt.plot(ttX2[0:2],ttY2[0:2],color = black, linestyle= ':')
plt.plot(ttx2[1:3],tty2[1:3],color = color1,linewidth = 3 , linestyle= '--',dashes=(10, 10))
plt.plot(ttX2[1:3],ttY2[1:3],color = color2, linewidth = 3,linestyle= '-')
plt.plot(ttx2[2:],tty2[2:],color = black, linestyle= ':')
plt.plot(ttX2[2:],ttY2[2:],color = black, linestyle= ':')
if i% 2 == 0:
plt.plot(xm2,ym2, color = black, linewidth = 3,zorder = 1)
plt.plot(xm1,ym1, color = black, linewidth = 3,zorder = 1)
else:
plt.plot(xm2,ym2, color = 'grey', linewidth = 2, zorder = -1)
plt.plot(xm1,ym1, color = 'grey', linewidth = 2, zorder = -1)
#if i ==0 :
plt.plot(x1,y1, color = color3, linewidth = 3)
plt.plot(x2,y2, color = color1, linestyle= '--',dashes=(10, 5), linewidth = 3, zorder = -1)
plt.plot(X2,Y2, color = color2, linewidth = 3)
plt.xlim([-0.5,14])
xr = 12
yr = 7
plt.text(xr-0.8,0,'RA $\cdot$ cos(Dec)',verticalalignment = 'center',fontsize = 25)
plt.text(0,yr + 0.25,'Dec',fontsize = 25, horizontalalignment = 'center', rotation = 90)
plt.arrow(-0.025,0,xr-1,0,width = 0.05,overhang = 0.5,head_width = 0.5, head_length = 0.5,color= black, zorder = 100,length_includes_head=True)
plt.arrow(0,-0.025,0,yr-0.5,width = 0.05,overhang = 0.5,head_width = 0.5,head_length = 0.5,color= black, zorder = 100,length_includes_head=True)
plt.text(2,1.5,'Lens',color = color3, fontsize = 25, horizontalalignment = 'center', rotation = 0, weight = 'bold')
plt.text(4,7.5,'Star 1',color = color2,fontsize = 25, horizontalalignment = 'center', rotation = 0,weight = 'bold')
fig.savefig(imagepath + string + '.png', format = 'png')
print('Create Image: '+ imagepath+ string + '.png')
if paperpath is not None: fig.savefig(paperpath + string + '.png', format = 'png')
plt.close(fig)
def Image_Illustration2 (string = 'Illustration'):
'''------------------------------------------------------------
Description:
---------------------------------------------------------------
Input:
---------------------------------------------------------------
Output:
------------------------------------------------------------'''
dark= 'black' in plt.rcParams['savefig.facecolor']
if dark:
string = 'dark_'+string
black = color_own([0.,0.,0.,1])
grey = color_own([.5,.5,0.5,1])
cyan = color_own([0,1,1,1])
blue = color_own([0,0,1,1])
lime = color_own([0.6,1.2,0,1])
green = color_own([0,1,0,1])
red = color_own([1,0,0,1])
orange = color_own([1,1,0,1])
else:
black = color_own([0.,0.,0.,1])
grey = color_own([.5,.5,0.5,1])
cyan = color_own([0,1,1,1])
blue = color_own([0,0,1,1])
lime = color_own([0.6,1.2,0,1])
green = color_own([0,1,0,1])
red = color_own([1,0,0,1])
orange = color_own([1,1,0,1])
t = np.array([12, 35, 41, 61, 73, 89])
scandir = np.array([0.1, 0.7, 0.4, 0.8 , 0.2, 0.1])*np.pi
#Position_lens
x1 = np.linspace(1,13,100) + 0.3 * np.sin(np.linspace(np.pi/4,12*np.pi/4,100))
y1 = np.linspace(1,3,100) + 0.3* np.cos(np.linspace(np.pi/4,12*np.pi/4,100))
#unlensed Position_source
x2 = np.linspace(5,9,100)# + 0.03 * np.sin(np.linspace(np.pi/4,12*np.pi/4,100))
y2 = np.linspace(7,4.5,100)# + 0.03* np.cos(np.linspace(np.pi/4,12*np.pi/4,100))
d = np.sqrt((x1-x2)**2 + (y1-y2)**2)
TE = 2
X2 = x2 + (x2-x1) * TE/(d**2 +2*TE)
Y2 = y2 + (y2-y1) * TE/(d**2 +2*TE)
dX2 = x1-X2
dY2 = y1-Y2
dx2 = x1-x2
dy2 = y1-y2
fig = plt.figure(figsize= (12,8))
ax = plt.subplot(111)
ax.axis('equal')
ax.axis('off')
#---------------------------------------------------------------
#axis
plt.xlim([-0.5,14])
xr = 12
yr = 7
plt.text(xr-0.8,0,'RA $\cdot$ cos(Dec)',verticalalignment = 'center',fontsize = 25)
plt.text(0,yr + 0.25,'Dec',fontsize = 25, horizontalalignment = 'center', rotation = 90)
plt.arrow(-0.025,0,xr-1,0,width = 0.05,overhang = 0.5,head_width = 0.5, head_length = 0.5,color= black, zorder = 100,length_includes_head=True)
plt.arrow(0,-0.025,0,yr-0.5,width = 0.05,overhang = 0.5,head_width = 0.5,head_length = 0.5,color= black, zorder = 100,length_includes_head=True)
plt.text(2,1.5,'Lens',color = grey, fontsize = 25, horizontalalignment = 'center', rotation = 0, weight = 'bold')
#---------------------------------------------------------------
# Motion source
plt.plot(x1,y1, color = grey, linewidth = 7)
fig.savefig(imagepath + string + '_1.png', format = 'png')
plt.text(4,7.5,'Source',color = blue,fontsize = 25, horizontalalignment = 'center', rotation = 0,weight = 'bold')
plt.plot(x2,y2, color = cyan, linestyle= '--',dashes=(10, 5), linewidth = 3, zorder = -1)
fig.savefig(imagepath + string + '_2.png', format = 'png')
plt.plot(X2,Y2, color = blue, linewidth = 3)
for i in range(len(t)):
plt.plot([x2[t[i]],X2[t[i]]],[y2[t[i]],Y2[t[i]]],':',color = black)
fig.savefig(imagepath + string + '_3.png', format = 'png')
delta = 0.05
for i in range(len(t)):
xm1 =np.array([-1,1,1,-1,-1]) * np.cos(scandir[i]) + delta * np.array([-1,-1,1,1,-1]) * np.sin(scandir[i]) + x1[t[i]]
ym1 =np.array([-1,1,1,-1,-1]) * np.sin(scandir[i]) - delta * np.array([-1,-1,1,1,-1]) * np.cos(scandir[i]) + y1[t[i]]
xm2 =np.array([-1,1,1,-1,-1]) * np.cos(scandir[i]) + delta * np.array([-1,-1,1,1,-1]) * np.sin(scandir[i]) + X2[t[i]]
ym2 =np.array([-1,1,1,-1,-1]) * np.sin(scandir[i]) - delta * np.array([-1,-1,1,1,-1]) * np.cos(scandir[i]) + Y2[t[i]]
plt.plot(xm2,ym2, color = black, linewidth = 1,zorder = 1)
plt.plot(xm1,ym1, color = black, linewidth = 1,zorder = 1)
fig.savefig(imagepath + string + '_4.png', format = 'png')
for i in range(len(t)):
xm1 =np.array([-1,1,1,-1,-1]) * np.cos(scandir[i]) + delta * np.array([-1,-1,1,1,-1]) * np.sin(scandir[i]) + x1[t[i]]
ym1 =np.array([-1,1,1,-1,-1]) * np.sin(scandir[i]) - delta * np.array([-1,-1,1,1,-1]) * np.cos(scandir[i]) + y1[t[i]]
xm2 =np.array([-1,1,1,-1,-1]) * np.cos(scandir[i]) + delta * np.array([-1,-1,1,1,-1]) * np.sin(scandir[i]) + X2[t[i]]
ym2 =np.array([-1,1,1,-1,-1]) * np.sin(scandir[i]) - delta * np.array([-1,-1,1,1,-1]) * np.cos(scandir[i]) + Y2[t[i]]
dsc = ((dx2[t[i]]).reshape(-1,1)*[np.sin(scandir[i]),np.cos(scandir[i])] \
+ (dy2[t[i]]).reshape(-1,1) *[-np.cos(scandir[i]),np.sin(scandir[i])])[0]
dSC = ((dX2[t[i]]).reshape(-1,1)*[np.sin(scandir[i]),np.cos(scandir[i])] \
+ (dY2[t[i]]).reshape(-1,1) *[-np.cos(scandir[i]),np.sin(scandir[i])])[0]
ttX2 = np.array([0,-dSC[1]/2,dSC[1]/2,0]) * np.cos(scandir[i]) + ([x1[t[i]],x1[t[i]],X2[t[i]],X2[t[i]]])
ttY2 = np.array([0,-dSC[1]/2,dSC[1]/2,0]) * np.sin(scandir[i]) +([y1[t[i]],y1[t[i]],Y2[t[i]],Y2[t[i]]])
ttx2 = np.array([0,-dsc[1]/2-0.2,dsc[1]/2-0.2,0]) * np.cos(scandir[i]) + ([x1[t[i]],x1[t[i]],x2[t[i]],x2[t[i]]])
tty2 = np.array([0,-dsc[1]/2-0.2,dsc[1]/2-0.2,0]) * np.sin(scandir[i]) +([y1[t[i]],y1[t[i]],y2[t[i]],y2[t[i]]])
if i % 2 == 0:
plt.arrow(ttx2[2],tty2[2], 0.0001*(ttx2[2]-ttx2[1]),0.0001*(tty2[2]-tty2[1]),color= red, head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.arrow(ttx2[1],tty2[1], 0.0001*(ttx2[1]-ttx2[2]),0.0001*(tty2[1]-tty2[2]),color= red, head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.arrow(ttX2[2],ttY2[2], 0.0001*(ttX2[2]-ttX2[1]),0.0001*(ttY2[2]-ttY2[1]),color= orange, head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.arrow(ttX2[1],ttY2[1], 0.0001*(ttX2[1]-ttX2[2]),0.0001*(ttY2[1]-ttY2[2]),color= orange, head_width=0.2,\
overhang = 0.5, length_includes_head=True, zorder = 10)
plt.plot(ttx2[0:2],tty2[0:2],color = black, linestyle= ':')
plt.plot(ttX2[0:2],ttY2[0:2],color = black, linestyle= ':')
plt.plot(ttx2[1:3],tty2[1:3],color = red,linewidth = 3 , linestyle= '--',dashes=(10, 10))
plt.plot(ttX2[1:3],ttY2[1:3],color = orange, linewidth = 3,linestyle= '-')
plt.plot(ttx2[2:],tty2[2:],color = black, linestyle= ':')
plt.plot(ttX2[2:],ttY2[2:],color = black, linestyle= ':')
#if i ==0 :
fig.savefig(imagepath + string + '_5.png', format = 'png')
print('Create Image: '+ imagepath+ string + '.png')
if paperpath is not None: fig.savefig(paperpath + string + '.png', format = 'png')
plt.close(fig)
def Image_Illustration_Multi(string = 'Illustration_Multi'):
'''------------------------------------------------------------
Description:
---------------------------------------------------------------
Input:
---------------------------------------------------------------
Output:
------------------------------------------------------------'''
dark= 'black' in plt.rcParams['savefig.facecolor']
if dark:
string = 'dark_'+string
black = color_own([0.7,0.7,0.7,1])
grey = color_own([.5,.5,0.5,1])
cyan = color_own([0,1,1,1])
blue = color_own([0,0,1,1])
lime = color_own([0.6,1.2,0,1])
green = color_own([0,1,0,1])
red = color_own([1,0,0,1])
orange = color_own([1,.5,0,1])
else:
black = color_own([0.,0.,0.,1])
grey = color_own([.5,.5,0.5,1])
cyan = color_own([0,1,1,1])
blue = color_own([0,0,1,1])
lime = color_own([0.6,1.2,0,1])
green = color_own([0,1,0,1])
red = color_own([1,0,0,1])
orange = color_own([1,1,0,1])
t = np.array([12, 35, 41, 61, 73, 89])
scandir = np.array([0.1, 0.7, 0.4, 0.8 , 0.2, 0.1])*np.pi
x1 = np.linspace(1,13,100) + 0.3 * np.sin(np.linspace(np.pi/4,12*np.pi/4,100))
y1 = np.linspace(1,3,100) + 0.3* np.cos(np.linspace(np.pi/4,12*np.pi/4,100))
x2 = np.linspace(3,7,100)# + 0.03 * np.sin(np.linspace(np.pi/4,12*np.pi/4,100))
y2 = np.linspace(7,4.5,100)# + 0.03* np.cos(np.linspace(np.pi/4,12*np.pi/4,100))
x3 = np.linspace(12,10,100)# + 0.03 * np.sin(np.linspace(np.pi/4,12*np.pi/4,100))
y3 = np.linspace(8,6,100)# + 0.03* np.cos(np.linspace(np.pi/4,12*np.pi/4,100))
d2 = np.sqrt((x1-x2)**2 + (y1-y2)**2)
d3 = np.sqrt((x1-x3)**2 + (y1-y3)**2)
TE = 1.5
X2 = x2 + (x2-x1) * TE/(d2**2 +2*TE)
Y2 = y2 + (y2-y1) * TE/(d2**2 +2*TE)
X3 = x3 + (x3-x1) * TE/(d3**2 +2*TE)
Y3 = y3 + (y3-y1) * TE/(d3**2 +2*TE)
dX2 = x1-X2
dY2 = y1-Y2
dx2 = x1-x2
dy2 = y1-y2
dX3 = x1-X3
dY3 = y1-Y3
dx3 = x1-x3
dy3 = y1-y3
fig = plt.figure(figsize= (12,8.8))
plt.subplots_adjust(
top=0.95,
bottom=0.05,
left=0.05,
right=0.95,
hspace=0.0,
wspace=0.2)
ax = plt.subplot(111)
ax.axis('equal')
ax.axis('off')
for i in range(len(t)):
delta = 0.05
xm1 =np.array([-1,1,1,-1,-1]) * np.cos(scandir[i]) + delta * np.array([-1,-1,1,1,-1]) * np.sin(scandir[i]) + x1[t[i]]
ym1 =np.array([-1,1,1,-1,-1]) * np.sin(scandir[i]) - delta * np.array([-1,-1,1,1,-1]) * np.cos(scandir[i]) + y1[t[i]]
xm2 =np.array([-1,1,1,-1,-1]) * np.cos(scandir[i]) + delta * np.array([-1,-1,1,1,-1]) * np.sin(scandir[i]) + X2[t[i]]
ym2 =np.array([-1,1,1,-1,-1]) * np.sin(scandir[i]) - delta * np.array([-1,-1,1,1,-1]) * np.cos(scandir[i]) + Y2[t[i]]
xm3 =np.array([-1,1,1,-1,-1]) * np.cos(scandir[i]) + delta * np.array([-1,-1,1,1,-1]) * np.sin(scandir[i]) + X3[t[i]]
ym3 =np.array([-1,1,1,-1,-1]) * np.sin(scandir[i]) - delta * np.array([-1,-1,1,1,-1]) * np.cos(scandir[i]) + Y3[t[i]]
dSC3 = ((dX3[t[i]]).reshape(-1,1)*[np.sin(scandir[i]),np.cos(scandir[i])] \
+ (dY3[t[i]]).reshape(-1,1) *[-np.cos(scandir[i]),np.sin(scandir[i])])[0]
dSC2 = ((dX2[t[i]]).reshape(-1,1)*[np.sin(scandir[i]),np.cos(scandir[i])] \
+ (dY2[t[i]]).reshape(-1,1) *[-np.cos(scandir[i]),np.sin(scandir[i])])[0]
dsc3 = ((dx3[t[i]]).reshape(-1,1)*[np.sin(scandir[i]),np.cos(scandir[i])] \
+ (dy3[t[i]]).reshape(-1,1) *[-np.cos(scandir[i]),np.sin(scandir[i])])[0]
dsc2 = ((dx2[t[i]]).reshape(-1,1)*[np.sin(scandir[i]),np.cos(scandir[i])] \
+ (dy2[t[i]]).reshape(-1,1) *[-np.cos(scandir[i]),np.sin(scandir[i])])[0]
ttX2 = np.array([0,-dSC2[1]/2,dSC2[1]/2,0]) * np.cos(scandir[i]) + ([x1[t[i]],x1[t[i]],X2[t[i]],X2[t[i]]])
ttY2 = np.array([0,-dSC2[1]/2,dSC2[1]/2,0]) * np.sin(scandir[i]) +([y1[t[i]],y1[t[i]],Y2[t[i]],Y2[t[i]]])
ttX3 = np.array([0,-dSC3[1]/2,dSC3[1]/2,0]) * np.cos(scandir[i]) + ([x1[t[i]],x1[t[i]],X3[t[i]],X3[t[i]]])
ttY3 = np.array([0,-dSC3[1]/2,dSC3[1]/2,0]) * np.sin(scandir[i]) +([y1[t[i]],y1[t[i]],Y3[t[i]],Y3[t[i]]])
ttx2 = np.array([0,-dsc2[1]/2-0.3,dsc2[1]/2-0.3,0]) * np.cos(scandir[i]) + ([x1[t[i]],x1[t[i]],x2[t[i]],x2[t[i]]])
tty2 = np.array([0,-dsc2[1]/2-0.3,dsc2[1]/2-0.3,0]) * np.sin(scandir[i]) +([y1[t[i]],y1[t[i]],y2[t[i]],y2[t[i]]])
if i == 3: off = [-0.4,-0.2]
else: off = [0,-0.2]
ttx3 = np.array([0,-dsc3[1]/2+off[1],dsc3[1]/2+off[1],0]) * np.cos(scandir[i]) + ([x1[t[i]],x1[t[i]],x3[t[i]],x3[t[i]]])
tty3 = np.array([0,-dsc3[1]/2+off[1],dsc3[1]/2+off[1],0]) * np.sin(scandir[i]) +([y1[t[i]],y1[t[i]],y3[t[i]],y3[t[i]]])
ttX3 = np.array([0,-dSC3[1]/2+off[0],dSC3[1]/2+off[0],0]) * np.cos(scandir[i]) + ([x1[t[i]],x1[t[i]],X3[t[i]],X3[t[i]]])
ttY3 = np.array([0,-dSC3[1]/2+off[0],dSC3[1]/2+off[0],0]) * np.sin(scandir[i]) +([y1[t[i]],y1[t[i]],Y3[t[i]],Y3[t[i]]])
'''
if i % 2 == 0:
plt.arrow(ttX2[2],ttY2[2], 0.0001*(ttX2[2]-ttX2[1]),0.0001*(ttY2[2]-ttY2[1]),color = color_own([0,0,1,1]),head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.arrow(ttX2[1],ttY2[1], 0.0001*(ttX2[1]-ttX2[2]),0.0001*(ttY2[1]-ttY2[2]),color = color_own([0,0,1,1]),head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.plot(ttX2[0:2],ttY2[0:2],color = black, linestyle= ':')
plt.plot(ttX2[1:3],ttY2[1:3],color = [158/200,1/200,66/200, 1], linewidth = 3,linestyle= '-')
plt.plot(ttX2[2:],ttY2[2:],color = black, linestyle= ':')
'''
if i% 2 == 0:
plt.plot(xm2,ym2, color = black, linewidth = 1,zorder = 1)
plt.plot(xm1,ym1, color = black, linewidth = 1,zorder = 1)
plt.arrow(ttX2[2],ttY2[2], 0.0001*(ttX2[2]-ttX2[1]),0.0001*(ttY2[2]-ttY2[1]),color=red ,head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.arrow(ttX2[1],ttY2[1], 0.0001*(ttX2[1]-ttX2[2]),0.0001*(ttY2[1]-ttY2[2]),color=red ,head_width=0.2,\
overhang = 0.5, length_includes_head=True, zorder = 10)
plt.plot(ttX2[0:2],ttY2[0:2],color = black, linestyle= ':')
plt.plot(ttX2[1:3],ttY2[1:3],color = red, linewidth = 3)
plt.plot(ttX2[2:],ttY2[2:],color = black, linestyle= ':')
plt.plot(ttx2[0:2],tty2[0:2],color = black, linestyle= ':')
plt.plot(ttx2[1:3],tty2[1:3],color = orange, linewidth = 3,linestyle= '-', dashes=(10, 2))
plt.plot(ttx2[2:],tty2[2:],color = black, linestyle= ':')
plt.arrow(ttx2[2],tty2[2], 0.0001*(ttx2[2]-ttx2[1]),0.0001*(tty2[2]-tty2[1]),color = orange, head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.arrow(ttx2[1],tty2[1], 0.0001*(ttx2[1]-ttx2[2]),0.0001*(tty2[1]-tty2[2]),color = orange, head_width=0.2,\
overhang = 0.5, length_includes_head=True, zorder = 10)
if i >=3:
plt.plot(ttX3[0:2],ttY3[0:2],color = black, linestyle= ':')
plt.plot(ttX3[1:3],ttY3[1:3],color = red, linewidth = 3)
plt.plot(ttX3[2:],ttY3[2:],color = black, linestyle= ':')
plt.arrow(ttX3[2],ttY3[2], 0.0001*(ttX3[2]-ttX3[1]),0.0001*(ttY3[2]-ttY3[1]),color=red, head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.arrow(ttX3[1],ttY3[1], 0.0001*(ttX3[1]-ttX3[2]),0.0001*(ttY3[1]-ttY3[2]),color= red, head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.plot(xm3,ym3, color = black, linewidth = 1,zorder = 1)
plt.plot(ttx3[0:2],tty3[0:2],color = black, linestyle= ':')
plt.plot(ttx3[1:3],tty3[1:3],color = orange, linewidth = 3,linestyle= '-', dashes=(10, 2))
plt.plot(ttx3[2:],tty3[2:],color = black, linestyle= ':')
plt.arrow(ttx3[2],tty3[2], 0.0001*(ttx3[2]-ttx3[1]),0.0001*(tty3[2]-tty3[1]),color = orange, head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.arrow(ttx3[1],tty3[1], 0.0001*(ttx3[1]-ttx3[2]),0.0001*(tty3[1]-tty3[2]),color = orange, head_width=0.2,\
overhang = 0.5, length_includes_head=True, zorder = 10)
'''
else:
plt.plot(xm3,ym3, color = 'grey', linewidth = 2, zorder = -1)
'''
elif i >=3:
plt.plot(xm3,ym3, color = black, linewidth = 1,zorder = 1)
plt.plot(xm1,ym1, color = black, linewidth = 1,zorder = 1)
#plt.plot(xm2,ym2, color = 'grey', linewidth = 2, zorder = -1)
plt.plot(ttX3[0:2],ttY3[0:2],color = black, linestyle= ':')
plt.plot(ttX3[1:3],ttY3[1:3],color = red, linewidth = 3 )
plt.plot(ttX3[2:],ttY3[2:],color = black, linestyle= ':')
plt.arrow(ttX3[2],ttY3[2], 0.0001*(ttX3[2]-ttX3[1]),0.0001*(ttY3[2]-ttY3[1]),color= red, head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.arrow(ttX3[1],ttY3[1], 0.0001*(ttX3[1]-ttX3[2]),0.0001*(ttY3[1]-ttY3[2]),color= red, head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.plot(ttx3[0:2],tty3[0:2],color = black, linestyle= ':')
plt.plot(ttx3[1:3],tty3[1:3],color = orange, linewidth = 3,linestyle= '-', dashes=(10, 2))
plt.plot(ttx3[2:],tty3[2:],color = black, linestyle= ':')
plt.arrow(ttx3[2],tty3[2], 0.0001*(ttx3[2]-ttx3[1]),0.0001*(tty3[2]-tty3[1]),color= orange, head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.arrow(ttx3[1],tty3[1], 0.0001*(ttx3[1]-ttx3[2]),0.0001*(tty3[1]-tty3[2]),color= orange, head_width=0.2,\
overhang = 0.5, length_includes_head=True, zorder = 10)
'''
else:
plt.plot(xm3,ym3, color = 'grey', linewidth = 2, zorder = -1)
plt.plot(xm2,ym2, color = 'grey', linewidth = 2, zorder = -1)
plt.plot(xm1,ym1, color = 'grey', linewidth = 2, zorder = -1)
'''
#if i ==0 :
plt.plot(x1,y1, color = grey, linewidth = 7)
plt.plot(x2,y2, color = cyan, linestyle= '--',dashes=(6, 2), linewidth = 3, zorder = -1)
plt.plot(X2,Y2, color = blue, linewidth = 3)
plt.plot(x3,y3, color = lime, linestyle= '--',dashes=(6, 2), linewidth = 3, zorder = -1)
plt.plot(X3,Y3, color = green, linewidth = 3)
plt.xlim([-0.2,13.5])
xr = 12
yr = 7
plt.text(xr-0.8,0,'RA $\cdot$ cos(Dec)',verticalalignment = 'center',fontsize = 25)
plt.text(0,yr + 0.25,'Dec',fontsize = 25, horizontalalignment = 'center', rotation = 90)
plt.text(2,1.5,'Lens',color = grey,fontsize = 25, horizontalalignment = 'center', rotation = 0, weight = 'bold')
plt.text(4,7.5,'Star 1',color = blue, fontsize = 25, horizontalalignment = 'center', rotation = 0,weight = 'bold')
plt.text(11,8,'Star 2',color = green, fontsize = 25, horizontalalignment = 'center', rotation = 0,weight = 'bold')
plt.arrow(-0.025,0,xr-1,0,width = 0.05,overhang = 0.5,head_width = 0.5, head_length = 0.5,color= black, zorder = 100,length_includes_head=True)
plt.arrow(0,-0.025,0,yr-0.5,width = 0.05,overhang = 0.5,head_width = 0.5,head_length = 0.5,color= black, zorder = 100,length_includes_head=True)
fig.savefig(imagepath + string + '.png', format = 'png')
print('Create Image: '+ imagepath+ string + '.png')
if paperpath is not None: fig.savefig(paperpath + string + '.png', format = 'png')
plt.close(fig)
def Image_compare_micro(string = 'aml_vs_pml.png'):
'''------------------------------------------------------------
Description:
---------------------------------------------------------------
Input:
---------------------------------------------------------------
Output:
------------------------------------------------------------'''
dark= 'black' in plt.rcParams['savefig.facecolor']
if dark:
string = 'dark_'+string
c1 =color_own([1,1,0,1])
c2 =color_own([1,0,1,1])
c3 =color_own([0.6,1.2,0,1])
c4 =color_own([0,1,1,1])
else:
c1 =color_own([0,0,1,1])
c2 =color_own([1,0,0,1])
c3 =color_own([0.5,1,0,1])
c4 =color_own([1,0,1,1])
rc('xtick', labelsize=24)
rc('ytick', labelsize=24)
Separation_E1 = 20
Separation_E2 = 2
Separation_E3 = 20
xx1 = np.array(range(-1000,1000,1))
xx2 = xx1
xx3 = xx1
yy1 = xx1*0+10
yy2 = xx1*0+1
yy3 = xx1*0+200
uu1 = np.sqrt(xx1*xx1+yy1*yy1)/Separation_E1
uu2 = np.sqrt(xx2*xx2+yy2*yy2)/Separation_E2
uu3 = np.sqrt(xx3*xx3+yy3*yy3)/Separation_E3
dSeparation1 = uu1/(uu1*uu1 + 2)*Separation_E1
dSeparation2 = uu2/(uu2*uu2 + 2)*Separation_E2
dSeparation3 = uu3/(uu3*uu3 + 2)*Separation_E3
A1 = (uu1*uu1+2)/(uu1*np.sqrt(uu1*uu1+4))
A2 = (uu2*uu2+2)/(uu2*np.sqrt(uu2*uu2+4))
A3 = (uu3*uu3+2)/(uu3*np.sqrt(uu3*uu3+4))
dm1 = 2.5*np.log10(A1)
dm2 = 2.5*np.log10(A2)
dm3 = 2.5*np.log10(A3)
xx1 = xx1/250
xx2 = xx2/250
xx3 = xx3/250
figure = plt.figure(figsize = (12,12))
plt.subplots_adjust(hspace=0.1)
ax1 = plt.subplot2grid((2,1), (0, 0), rowspan=1)
plt.plot(xx1,dSeparation1, color = c1,linewidth = 4)
line1, = plt.plot(xx2,dSeparation2,color = c3 ,linewidth = 4)
plt.plot(xx3,dSeparation3,linestyle = '--',color = c4,linewidth = 4)
line1.set_dashes([10, 2, 10, 2])
plt.yticks([1,2,3,4,5,6,7,8],['1.0 ','2.0 ','3.0 ','4.0 ','5.0 ','6.0 ','7.0 ','8.0 '])
plt.ylim([0,8])
plt.ylabel('Shift [mas]',fontsize = 30)
plt.plot(xx1,xx1*0+0.5, color=c2,linewidth = 3)
ax1.tick_params(length=6, width=2)
ax1.tick_params(which='minor', length=4,width=2)
xticklabels1 = ax1.get_xticklabels()
plt.setp(xticklabels1, visible=False)
ax2 = plt.subplot2grid((2,1), (1, 0), rowspan=1,sharex=ax1)
plt.semilogy(xx1,dm1,color = c1,linewidth = 4)
line1, = plt.semilogy(xx2,dm2,color = c3,linewidth = 4)
plt.semilogy(xx3,dm3,linestyle = '--',color = c4,linewidth = 4)
line1.set_dashes([10, 2, 10, 2])
plt.semilogy(xx1,xx1*0+0.001, color= c2 ,linewidth = 3)
plt.ylim([0.0001,1])
plt.ylabel('Magnification [mag]',fontsize = 30)
ax2.tick_params(length=6, width=2)
ax2.tick_params(which='minor', length=4,width=2)
plt.xlabel('$\Delta$ T [yr]',fontsize = 30)
figure.savefig(imagepath+ string,format = 'png')
print('Create Image: '+ imagepath+ string + '.png')
plt.close(figure)
def Image_astroshift(string = 'astroshift'):
'''------------------------------------------------------------
Description:
---------------------------------------------------------------
Input:
---------------------------------------------------------------
Output:
------------------------------------------------------------'''
fig,ax = plt.subplots(figsize = [12,12] )
x = np.array([-100+0.1*t for t in range(2000)])
ThetaE = 12.75
umin = 0.75
ThetaE = 12.75
fls = 10
siz = 10
y = np.array([ThetaE*umin for i in x])
ax.set_xlim(-30,30)
ax.set_ylim(-30,30)
ux =np.array([x[i]/ThetaE for i in range(len(x))])
uy =np.array([y[i]/ThetaE for i in range(len(x))])
u = np.array([np.sqrt(np.power(ux[i],2)+np.power(uy[i],2)) for i in range(len(x))])
theta_px = - (np.sqrt(u*u+4) - u)/(2 * u) * x
theta_py = - (np.sqrt(u*u+4) - u)/(2 * u) * y
theta_mx = - (-np.sqrt(u*u+4) - u)/(2 * u) * x
theta_my = - (- | np.sqrt(u*u+4) | numpy.sqrt |
import numpy as np
class hmm_discreet(object):
"""An exemple of discreet Hidden Markov Model. """
def __init__(self, M, X, V):
r"""Initialize a discreet Hidden Markov Model.
Arguments :
M -- the number of states
X -- the observation sequences
V -- the number of symbols
"""
self.T_MAX = 250
self.T_INC = 10
self.M = M
self.X = X
self.V = V
self.N = len(X)
self.T = len(X[0])
self.alpha = np.zeros((self.T, self.M))
self.beta = np.zeros((self.T, self.M))
self.A = np.ones((self.M, self.M))
self.B = np.ones((self.M, self.V))
self.pi = np.ones(self.M)
self.alphas = []
self.betas = []
self.P = np.zeros(self.N)
self.scales = []
self.logP = np.zeros(self.N)
self.gamma = np.ones((self.T, self.M))
self.xi = np.ones((self.T, self.M, self.M))
@staticmethod
def normalized_matrix(m):
r"""Normalize a matrix.
Arguments :
m -- the matrix to normalize
"""
if len(m.shape) > 1:
return m / m.sum(axis=1, keepdims=True)
elif len(m.shape) == 1:
return m / m.sum(axis=0, keepdims=True)
else:
return ValueError("Reload Matrix")
@staticmethod
def random_matrix_normalized(dim1, dim2=0):
r"""Normalize a vector or a matrix.
Arguments :
dim1 -- the first dimension of the matrix
Keywords Arguments :
dim2 -- the second dimension of the matrix
"""
if dim2 == 0:
v = np.random.random(dim1)
return v / v.sum(axis=0, keepdims=True)
else:
m = np.random.random((dim1, dim2))
return m / m.sum(axis=1, keepdims=True)
def compute_alpha(self, seq_idx):
r"""Compute alpha matrix of a given observation sequence.
Arguments:
seq_idx -- the index of the observed sequence
Latex:
Equation: $\alpha({t+1,}{j})=(b[j,O[t+1]].\sum_{i=1}^{N}(a_{i,j}.\alpha({t},{i})) , \forall t \in [1,T-1], \forall j \in [1,N]$
"""
x = self.X[seq_idx]
self.T = len(x)
for i in range(self.M):
self.alpha[0][i] = self.B[i][x[0]]*self.pi[i]
for t in range(1, self.T):
for j in range(self.M):
s = 0
for i in range(self.M):
s += self.A[i][j]*self.alpha[t-1][i]
self.alpha[t][j] = self.B[j][x[t]]*s
self.alphas.append(self.alpha.copy())
def compute_alpha_scaled(self, seq_idx):
r"""Compute alpha scaled matrix of a given observation sequence.
Arguments:
seq_idx -- the index of the observed sequence
Latex:
Equation: $\alpha({t+1,}{j})=(b[j,O[t+1]].\sum_{i=1}^{N}(a_{i,j}.\alpha({t},{i})) , \forall t \in [1,T-1], \forall j \in [1,N]$
"""
x = self.X[seq_idx]
self.T = len(x)
scale = np.zeros(self.T)
for i in range(self.M):
self.alpha[0][i] = self.B[i][x[0]]*self.pi[i]
print(self.alpha[0])
scale[0] = self.alpha[0].sum()
self.alpha[0] /= scale[0]
print(self.alpha[0])
for t in range(1, self.T):
for j in range(self.M):
s = 0
for i in range(self.M):
s += self.A[i][j]*self.alpha[t-1][i]
self.alpha[t][j] = self.B[j][x[t]]*s
scale[t] = self.alpha[t].sum()
self.alpha[t] = self.alpha[t] / scale[t]
self.alphas.append(self.alpha.copy())
self.scales.append(scale)
def compute_p_from_alpha(self, seq_idx):
r"""Compute probability of observing a sequence from alpha.
Arguments:
seq_idx -- the index of the observed sequence
Latex:
Equation: # $P(X(1,...,T) / H)=\sum_{i=1}^{N}\alpha({T},{i}))$
"""
p = self.alpha[-1].sum()
self.P[seq_idx] = p
return p
def compute_logp_from_alpha(self, seq_idx):
r"""Compute log probability of observing a sequence from alpha.
Arguments:
seq_idx -- the index of the observed sequence
Latex:
Equation: # $P(X(1,...,T) / H)=\sum_{i=1}^{N}\alpha({T},{i}))$
"""
p = self.scales[seq_idx][self.T-1].sum()
self.logP[seq_idx] = p
return p
def compute_beta(self, seq_idx):
r"""Compute bêta matrix of a given observation sequence.
Arguments:
seq_idx -- the index of the observed sequence
Latex:
Equation: $\beta({t-1,}{i})=\sum_{j=1}^{N}(a(i,j)*b(j,o_{t})*\beta(t,j))$ $\forall$ T $\in$ [T-1, T-2, ...,1] and $\forall$ i $\in$ [1,N]$
"""
x = self.X[seq_idx]
self.T = len(x)
for i in range(self.M):
self.beta[self.T-1][i] = 1
for t in reversed(range(1, self.T)):
for i in range(self.M):
s = 0
for j in range(self.M):
s += self.A[i][j]*self.B[j][x[t]]*self.beta[t][j]
self.beta[t-1][i] = s
self.betas.append(self.beta.copy())
def compute_p_from_beta(self, seq_idx):
r"""Compute probability of observing a sequence from bêta.
Arguments:
seq_idx -- the index of the observed sequence
Latex:
Equation: # $P(X(1,...,T) / H)=\sum_{i}\Pi_{i}*b(i,o_{1})*\beta(1,i)$
"""
x = self.X[seq_idx]
p = np.sum(self.beta[0][:]*self.B[:, x[0]] * self.pi)
self.P[seq_idx] = p
return p
def compute_p_from_alpha_beta(self, t=None):
r"""Compute probability of observing a sequence from alpha and bêta.
Keywords Arguments:
t -- the time index, Default : None
Latex:
Equation: $P(X(1,...,T) / H)=\sum_{i}\alpha(t,i)*beta(t,i)$
"""
if t != None:
t = t
else:
t = np.random.choice(self.T)
p = np.sum(self.alpha[t][:]*self.beta[t][:])
return p
def compute_gamma(self, seq_idx):
r"""Compute gamma matrix of a given observation sequence.
Arguments:
seq_idx -- the index of the observed sequence
Latex:
Equation: $\gamma(t,i)=\alpha(t,i)*\beta(t,i)/(\sum_{i=1}^{N}\alpha(t,i)*\beta(t,i))$
"""
p = self.P[seq_idx]
alpha = self.alphas[seq_idx]
beta = self.betas[seq_idx]
for t in range(self.T):
self.gamma[t][:] = (alpha[t][:]*beta[t][:])/p
def compute_gamma_scaled(self, seq_idx):
r"""Compute gamma scaled matrix of a given observation sequence.
Arguments:
seq_idx -- the index of the observed sequence
Latex:
Equation: $\gamma(t,i)=\alpha(t,i)*\beta(t,i)/(\sum_{i=1}^{N}\alpha(t,i)*\beta(t,i))$
"""
p = self.P[seq_idx]
alpha = self.alphas[seq_idx]
beta = self.betas[seq_idx]
for t in range(self.T):
self.gamma[t][:] = (alpha[t][:]*beta[t][:])
def compute_xi(self, seq_idx):
r"""Compute xi matrix of a given observation sequence.
Arguments:
seq_idx -- the index of the observed sequence
Latex:
Equation: $\xi(t,i,j)=\frac{\alpha(t,i)*a_{i,j}*b{j,X_{t+1}}*\beta(t+1,j)}{p(X/\lambda)}$
"""
p = self.P[seq_idx]
x = self.X[seq_idx]
alpha = self.alphas[seq_idx]
beta = self.betas[seq_idx]
for i in range(self.M):
for j in range(self.M):
for t in range(self.T-1):
self.xi[t, i, j] = (
alpha[t, i] * self.A[i, j] * self.B[j, x[t+1]] * beta[t+1, j])/p
def compute_xi_scaled(self, seq_idx):
r"""Compute xi matrix of a given observation sequence.
Arguments:
seq_idx -- the index of the observed sequence
Latex:
Equation: $\xi(t,i,j)=\frac{\alpha(t,i)*a_{i,j}*b{j,X_{t+1}}*\beta(t+1,j)}{p(X/\lambda)}$
"""
p = self.P[seq_idx]
x = self.X[seq_idx]
alpha = self.alphas[seq_idx]
beta = self.betas[seq_idx]
for i in range(self.M):
for j in range(self.M):
for t in range(self.T-1):
self.xi[t, i, j] = (
alpha[t, i] * self.A[i, j] * self.B[j, x[t+1]] * beta[t+1, j])/self.scales[seq_idx][t+1]
def reestimate_A(self, n):
r"""Reevaluate A matrix using Baum-Welch re-estimation formulas.
Arguments:
n -- the time index
Latex:
Equation: $\hat a_{i,j}=\frac{\sum_{t=1}^{T-1}\xi(t,i,j)}{\sum_{t=1}^{T-1}\gamma(t,i)}$
"""
num = 0
den = 0
den += (self.alphas[n][:-1] * self.betas[n][:-1]
).sum(axis=0, keepdims=True).T / self.P[n]
self.compute_xi(n)
for t in range(self.T-1):
num += self.xi[t, :, :]
return num, den
def reestimate_A_scaled(self, n):
r"""Reevaluate A matrix using Baum-Welch re-estimation formulas.
Arguments:
n -- the time index
Latex:
Equation: $\hat a_{i,j}=\frac{\sum_{t=1}^{T-1}\xi(t,i,j)}{\sum_{t=1}^{T-1}\gamma(t,i)}$
"""
num = 0
den = 0
den += (self.alphas[n][:-1] * self.betas[n]
[:-1]).sum(axis=0, keepdims=True).T
self.compute_xi_scaled(n)
for t in range(self.T-1):
num += self.xi[t, :, :]
return num, den
def reestimate_B(self, n):
r"""Reevaluate B matrix using Baum-Welch re-estimation formulas.
Arguments:
n -- the time index
Latex:
Equation: $\hat b_{j,k}=\frac{\sum_{t=1,o_t=k}^{T}\gamma(t,j)}{\sum_{t=1}^{T}\gamma(t,j)}$
"""
num = np.zeros((self.M, self.V))
den = 0
x = self.X[n]
den += (self.alphas[n] * self.betas[n]
).sum(axis=0, keepdims=True).T / self.P[n]
self.compute_gamma(n)
for t in range(self.T):
num[:, x[t]] += self.gamma[t][:]
return num, den
def reestimate_B_scaled(self, n):
r"""Reevaluate B matrix using Baum-Welch re-estimation formulas.
Arguments:
n -- the time index
Latex:
Equation: $\hat b_{j,k}=\frac{\sum_{t=1,o_t=k}^{T}\gamma(t,j)}{\sum_{t=1}^{T}\gamma(t,j)}$
"""
num = np.zeros((self.M, self.V))
den = 0
x = self.X[n]
den += (self.alphas[n] * self.betas[n]).sum(axis=0, keepdims=True).T
self.compute_gamma_scaled(n)
for t in range(self.T):
num[:, x[t]] += self.gamma[t][:]
return num, den
def Baum_Welch_Algorithm(self,A_init=None,B_init=None,pi_init=None,max_iter=1):
r"""Reevaluate A,B,Pi matrix using Baum-Welch re-estimation formulas.
Arguments:
A_init -- the initial A matrix
B_init -- the initial B matrix
pi_init -- the initial Pi matrix
max_iter -- the maximim nunber of iteration
"""
if (A_init, B_init, pi_init) != (None, None, None):
self.A = normalized_matrix(A_init)
self.B = normalized_matrix(B_init)
self.pi = normalized_matrix(pi_init)
else:
self.A = random_matrix_normalized(self.M,self.M)
self.B = random_matrix_normalized(self.M,self.V)
self.pi = random_matrix_normalized(self.M)
self.costs = []
# iteration loop
for i in range(max_iter):
num_A = 0
den_A = 0
num_B = np.zeros((self.M, self.V))
den_B = 0
pi = np.zeros(self.M)
self.alphas = []
self.betas = []
self.P = np.zeros(self.N)
for n in range(self.N):
self.compute_alpha(n)
self.compute_p_from_alpha(n)
self.compute_beta(n)
pi += (self.alphas[n][0] * self.betas[n][0])/self.P[n]
numA, denA = self.reestimate_A(n)
num_A += numA
den_A += denA
numB, denB = self.reestimate_B(n)
num_B += numB
den_B += denB
self.costs.append(np.sum(self.P.copy()))
self.pi = pi / self.N
self.A = num_A / den_A
self.B = num_B / den_B
def Baum_Welch_Algorithm_scaled(self,A_init=None,B_init=None,pi_init=None,max_iter=1):
r""" eevaluate A,B,Pi matrix using Baum-Welch re-estimation formulas.
Arguments:
A_init -- the initial A matrix
B_init -- the initial B matrix
pi_init -- the initial Pi matrix
max_iter -- the maximim nunber of iteration
"""
if (A_init, B_init, pi_init) != (None, None, None):
self.A = normalized_matrix(A_init)
self.B = normalized_matrix(B_init)
self.pi = normalized_matrix(pi_init)
else:
self.A = random_matrix_normalized(self.M,self.M)
self.B = random_matrix_normalized(self.M,self.V)
self.pi = random_matrix_normalized(self.M)
self.costs = []
# iteration loop
for i in range(max_iter):
num_A = 0
den_A = 0
num_B = np.zeros((self.M, self.V))
den_B = 0
pi = np.zeros(self.M)
self.alphas = []
self.betas = []
self.scales = []
self.logP = np.zeros(self.N)
for n in range(self.N):
self.compute_alpha_scaled(n)
self.compute_logp_from_alpha(n)
self.compute_beta_scaled(n)
pi += (self.alphas[n][0] * self.betas[n][0])
numA, denA = self.reestimate_A_scaled(n)
num_A += numA
den_A += denA
numB, denB = self.reestimate_B_scaled(n)
num_B += numB
den_B += denB
self.costs.append(np.sum(self.logP.copy()))
self.pi = pi / self.N
self.A = num_A / den_A
self.B = num_B / den_B
def likelihood(self, x):
r"""Compute likelihood P(x/lambda) given an observed sequence x.
1
Arguments :
x -- the observed sequence
"""
T = len(x)
alpha = np.zeros((T, self.M))
alpha[0] = self.pi*self.B[:, x[0]]
for t in range(1, T):
alpha[t] = alpha[t-1].dot(self.A) * self.B[:, x[t]]
return alpha[-1].sum()
def log_likelihood(self, x):
r"""Compute log likelihood log(P(x/lambda)) given an observed sequence x.
1
Arguments :
x -- the observed sequence
"""
T = len(x)
scale = np.zeros(T)
alpha = np.zeros((T, self.M))
alpha[0] = self.pi*self.B[:, x[0]]
for t in range(1, T):
alpha_t = alpha[t-1].dot(self.A) * self.B[:, x[t]]
scale[t] = alpha_t.sum()
alpha[t] = alpha_t / scale[t]
return np.log(scale[-1].sum())
def log_likelihood_multi(self, X):
r"""Compute log likelihoods log(P(x/lambda)) given observed sequence x
of vector X .
1
Arguments :
X -- the observed sequences vector
"""
return np.array([self.log_likelihood(x) for x in X])
def get_state_sequence(self, x):
r"""Compute Viterbi algorithm in order to get the optimal state sequence .
1
Arguments :
x -- the observed sequences
"""
T = len(x)
delta = np.zeros((T, self.M))
psi = np.zeros((T, self.M))
delta[0] = self.pi*self.B[:, x[0]]
for t in range(1, T):
for j in range(self.M):
delta[t, j] = np.max(delta[t-1]*self.A[:, j]) * self.B[j, x[t]]
psi[t, j] = np.argmax(delta[t-1]*self.A[:, j])
# finding the path
states = np.zeros(T, dtype=np.int32)
states[T-1] = np.argmax(delta[T-1])
for t in reversed(range(T-1)):
states[t] = psi[t+1, states[t+1]]
return states
def get_state_sequence_scaled(self, x):
r"""Compute Viterbi algorithm in order to get the optimal state sequence .
1
Arguments :
x -- the observed sequences
"""
T = len(x)
delta = | np.zeros((T, self.M)) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
CESSIPy: Civil Engineer Stochastic System Identification for Python
Author: <NAME>
Support email: <EMAIL>
Site: https://github.com/MatheusCarini/CESSIPy
MIT License
Federal University of Rio Grande do Sul, Porto Alegre, Brazil
Version: 1.1
Date: 20211012
"""
#=============================================================================
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from MRPy import MRPy
from scipy import signal
from scipy.optimize import curve_fit
from matplotlib.gridspec import GridSpec
plt.rcParams["font.family"] = "Times New Roman"
mpl.rcParams['mathtext.fontset'] = 'cm'
#=============================================================================
# Naked Class
#=============================================================================
class auxclass(np.ndarray):
"""
Create a simple class to improve code readability
"""
def __new__(cls, np_array):
return np.asarray(np_array).view(cls)
#=============================================================================
# Time-Domain
#=============================================================================
def rearrange_data(self,ref):
"""
Rearrange the l outputs by positioning the r reference outputs in the first
rows.
Parameters
-------
self : MRPy_like
Time data MRPy object.
ref: tupple, list
List of reference sensors.
Returns
-------
yk : MRPy_like
MRPy object that contains the reference outputs in the first rows and
the attributes r and l.
.. l : MRPy attribute
Number of outputs.
.. r : MRPy attribute
Number of reference outputs.
"""
r = len(ref)
l = self.shape[0]
yk = MRPy(np.empty((l,self.N)),fs=self.fs)
yk.r = r
yk.l = l
yk[:r,:] = self[ref,:]
yk[r:,:] = np.delete(self, ref, 0)
return yk
#-----------------------------------------------------------------------------
def Toeplitz(self, i):
"""
Create the block Toeplitz matriz, which gathers the output covariances
estimates up to 2*i-1 time lags.
Parameters
-------
self : MRPy_like
MRPy object that contains the time data and the attributes r and l.
i : int
Number of time lags used to calculate the covariances length.
Note that they are estimated up to 2*i-1 time lags.
Returns
-------
T : auxclass_like
Auxclass object that contains the block Toeplitz matrix and the
attributes r, l and i.
"""
N = self.N - 2*i + 1
r = self.r
l = self.l
Ypref = np.zeros((r*i,N))
Yf = np.zeros((l*i,N))
for k in range(i):
Ypref[k*r:k*r+r,:] = self[:r,k:k+N]
Yf [k*l:k*l+l,:] = self[: ,k+i:k+i+N]
Ypref = Ypref/N**0.5
Yf = Yf /N**0.5
T = auxclass(Yf @ Ypref.T)
T.fs, T.r, T.l, T.i = self.fs, r, l, i
return T
#-----------------------------------------------------------------------------
def SSI_COV(T, no):
"""
Covariance-Driven Stochastic Subspace Identification Method
Estimate the eigenfrequencies, damping ratios and mode shapes of the block
Toeplitz matrix.
Parameters
-------
T : auxclass_like
Auxclass object that contains the block Toeplitz matrix and the
attributes SVD, r, l and i.
no : int
State space model order.
Returns
-------
fn : ndarray
Eigenfrequencies array.
zt : ndarray
Damping ratios array.
V : ndarray
Mode shapes array as columns.
See also
-------
Toeplitz, SSI_COV_iterator
"""
l = T.l
i = T.i
U, S, VT = T.SVD
U1 = U[:,:no]
S1 = np.eye(no)*S[:no]
Oi = U1 @ S1**0.5
C = Oi[:l,:]
A = np.linalg.pinv(Oi[:l*(i-1),:]) @ Oi[l:l*i+1,:]
Λd, Ψ = np.linalg.eig(A)
λ = np.log(Λd)*T.fs
fn = np.abs(λ)/(2*np.pi)
zt = -np.real(λ)/np.abs(λ)
V = C @ Ψ
return fn, zt, V
#-----------------------------------------------------------------------------
def SSI_COV_iterator(yk, i, nmin, nmax, incr=2, plot=False):
"""
Iterate the SSI_COV function for model orders from nmin to nmax and step
equal incr.
Estimate the eigenfrequencies, damping ratios and mode shapes using
SSI COV algorithm for increasing state space orders.
Parameters
-------
yk : MRPy_like
MRPy object returned by rearrange_data function.
i : int
Number of time lags used to calculate the covariances length.
Note that they are estimated up to 2*i-1 time lags.
nmin : int
The starting order number of the state space model.
nmax : int
The end order number of the state space model.
incr : int, optional
Step, spacing between model orders. The default step size is 2.
plot : bool, optional
If true, plots the singular values graph of the Toeplitz matrix.
Default is false.
Returns
-------
FN : ndarray
Eigenfrequencies 2D array. Each row originates from the same state
space model.
ZT : ndarray
Damping ratios 2D array. Each row originates from the same state
space model.
VV : ndarray
Mode shapes 3D array. The first index selects the state space order.
Notes
-------
The modal parameters of the first nmin state space model are FN[0,:],
ZT[0,:] and VV[0,:,:].
"""
T = Toeplitz(yk, i)
T.method = 'SSI COV'
if plot: plot_singular_values(T)
T.SVD = np.linalg.svd(T)
n = np.arange(nmin,nmax+incr,incr)
FN = np.zeros((n.shape[0],nmax))
ZT = np.zeros((n.shape[0],nmax))
VV = np.zeros((n.shape[0],T.l,nmax),dtype=np.complex_)
for ii, no in np.ndenumerate(n):
FN[ii,:no], ZT[ii,:no], VV[ii,:,:no] = SSI_COV(T,no)
return FN, ZT, VV
#-----------------------------------------------------------------------------
def projection(yk, i):
"""
Compute the QR factorization of the Hankel matrix and calculate the
matrices Piref, Pi1ref and Yii.
Parameters
-------
yk : MRPy_like
MRPy object returned by rearrange_data function.
i : int
Number of time lags used to calculate the covariances length.
Note that they are estimated up to 2*i-1 time lags.
Returns
-------
Pi : auxclass_like
Auxclass object that contains the projection of the row space of the
future outputs into the rows space of the past reference outputs and
the attributes r, l and i.
Pi1 : array_like
Projection array changing the separation between past and future
outputs one row below.
Yii : array_like
Subset of the block Hankel matrix.
"""
N = yk.N - 2*i + 1
r = yk.r
l = yk.l
Ypref = np.zeros((r*i,N))
Yf = np.zeros((l*i,N))
for k in range(i):
Ypref[k*r:k*r+r,:] = yk[:r,k:k+N]
Yf [k*l:k*l+l,:] = yk[: ,k+i:k+i+N]
Ypref = Ypref/N**0.5
Yf = Yf /N**0.5
Href = np.vstack([Ypref,Yf])
R = np.linalg.qr(Href.T, mode='r').T
Pi = auxclass(R[r*i:,:r*i] @ np.eye(r*i,N))
Pi1 = R[r*i+l:,:r*i+r] @ np.eye(r*i+r,N)
Yii = R[r*i:r*i+l,:r*i+l] @ np.eye(r*i+l,N)
Pi.fs, Pi.r, Pi.l, Pi.i = yk.fs, r, l, i
return Pi, Pi1, Yii
#-----------------------------------------------------------------------------
def SSI_DATA(Pi, Pi1, Yii, no):
"""
Data-Driven Stochastic Subspace Identification Method
Estimate the eigenfrequencies, damping ratios and mode shapes of the
Piref, Pi1ref e Yii matrices.
Parameters
-------
Pi, Pi1, Yii
See projection.
no : int
State space model order.
Returns
-------
fn : ndarray
Eigenfrequencies array.
zt : ndarray
Damping ratios array.
V : ndarray
Mode shapes array as columns.
"""
U, S, VT = Pi.SVD
U1 = U[:,:no]
S1 = np.eye(no)*S[:no]
Oi = U1 @ S1**0.5
Oi1 = Oi[:-Pi.l,:]
Xi = np.linalg.pinv(Oi) @ Pi
Xi1 = np.linalg.pinv(Oi1) @ Pi1
AC = np.vstack([Xi1,Yii]) @ np.linalg.pinv(Xi)
A = AC[:no,:]
C = AC[no:,:]
Λd, Ψ = np.linalg.eig(A)
λ = np.log(Λd)*Pi.fs
fn = np.abs(λ)/(2*np.pi)
zt = -np.real(λ)/np.abs(λ)
V = C @ Ψ
return fn, zt, V
#-----------------------------------------------------------------------------
def SSI_DATA_iterator(yk, i, nmin, nmax, incr=2, plot=False):
"""
Iterate the SSI_DATA function for model orders from nmin to nmax and step
equal incr.
Estimate the eigenfrequencies, damping ratios and mode shapes using
SSI DATA algorithm for increasing state space orders.
Parameters
-------
yk : MRPy_like
MRPy object returned by rearrange_data function.
i : int
Number of time lags used to calculate the covariances length.
Note that they are estimated up to 2*i-1 time lags.
nmin : int
The starting order number of the state space model.
nmax : int
The end order number of the state space model.
incr : int, optional
Step, spacing between model orders. The default step size is 2.
plot : bool, optional
If true, plots the singular values graph of the Pi matrix.
Default is false.
Returns
-------
FN : ndarray
Eigenfrequencies 2D array. Each row originates from the same state
space model.
ZT : ndarray
Damping ratios 2D array. Each row originates from the same state
space model.
VV : ndarray
Mode shapes 3D array. The first index selects the state space order.
Notes
-------
The modal parameters of the first nmin state space model are FN[0,:],
ZT[0,:] and VV[0,:,:].
"""
Pi, Pi1, Yii = projection(yk, i)
Pi.method = 'SSI DATA'
if plot: plot_singular_values(Pi)
Pi.SVD = np.linalg.svd(Pi)
n = np.arange(nmin,nmax+incr,incr)
FN = np.zeros((n.shape[0],nmax))
ZT = np.zeros((n.shape[0],nmax))
VV = np.zeros((n.shape[0],Pi.l,nmax),dtype=np.complex_)
for ii, no in np.ndenumerate(n):
FN[ii,:no],ZT[ii,:no],VV[ii,:,:no] = SSI_DATA(Pi,Pi1,Yii,no)
return FN, ZT, VV
#-----------------------------------------------------------------------------
def Fast_SSI(yk, i, nmin, nmax, incr=2, plot=False, based='COV'):
"""
Estimate the eigenfrequencies, damping ratios and mode shapes using Fast
Subspace-Based System Identification algorithm 2 from [1] for increasing
state space orders.
Parameters
-------
yk : MRPy_like
MRPy object returned by rearrange_data function.
i : int
Number of time lags used to calculate the covariances length.
Note that they are estimated up to 2*i-1 time lags.
nmin : int
The starting order number of the state space model.
nmax : int
The end order number of the state space model.
incr : int, optional
Step, spacing between model orders. The default step size is 2.
plot : bool, optional
If true, plots the singular values graph. Default is false.
based : string, optinal
SSI based method. If 'COV', it uses the covariance-driven SSI. If
'DATA', it uses the data-driven SSI. Default is 'COV'.
Returns
-------
FN : ndarray
Eigenfrequencies 2D array. Each row originates from the same state
space model.
ZT : ndarray
Damping ratios 2D array. Each row originates from the same state
space model.
VV : ndarray
Mode shapes 3D array. The first index selects the state space order.
Notes
-------
The modal parameters of the first nmin state space model are FN[0,:],
ZT[0,:] and VV[0,:,:].
Reference
----------
.. [1] <NAME>; <NAME>. Fast Multi-Order Computation of System
Matrices in Subspace-Based System Identification. Control
Engineering Practice, Elsevier, 2012, 20 (9), pp.882-894.
10.1016/j.conengprac.2012.05.005. hal-00724068
"""
if based.lower() == 'cov':
T = Toeplitz(yk, i)
T.method = 'SSI COV'
if plot: plot_singular_values(T)
U, S, VT = np.linalg.svd(T)
U1 = U[:,:nmax]
S1 = np.eye(nmax)*S[:nmax]
Oi = U1 @ S1**0.5
elif based.lower() == 'data':
Pi, Pi1, Yii = projection(yk, i)
Pi.method = 'SSI DATA'
if plot: plot_singular_values(Pi)
U, S, VT = np.linalg.svd(Pi)
U1 = U[:,:nmax]
S1 = np.eye(nmax)*S[:nmax]
Oi = U1 @ S1**0.5
else:
sys.exit('based method must be COV or DATA')
l = yk.l
Oiu = Oi[:l*(i-1),:]
Oid = Oi[l:l*i+1 ,:]
C = Oi[:l,:]
Q, R = np.linalg.qr(Oiu)
St = Q.T @ Oid
n = np.arange(nmin,nmax+incr,incr)
FN = np.zeros((n.shape[0],nmax))
ZT = np.zeros((n.shape[0],nmax))
VV = np.zeros((n.shape[0],l,nmax),dtype=np.complex_)
for ii, no in np.ndenumerate(n):
A = np.linalg.inv(R[:no,:no]) @ St[:no,:no]
Cj = C[:,:no]
Λd, Ψ = np.linalg.eig(A)
λ = np.log(Λd)*yk.fs
FN[ii,:no] = np.abs(λ)/(2*np.pi)
ZT[ii,:no] = -np.real(λ)/np.abs(λ)
VV[ii,:,:no] = Cj @ Ψ
return FN, ZT, VV
#-----------------------------------------------------------------------------
def IV(T, no):
"""
Instrumental Variable Method
Estimate the eigenfrequencies, damping ratios and mode shapes of the block
Toeplitz matrix.
Parameters
-------
T : auxclass_like
Auxclass object that contains the block Toeplitz matrix and the
attributes SVD, r, l and i.
no : int
State space model order.
Returns
-------
fn : ndarray
Eigenfrequencies array.
zt : ndarray
Damping ratios array.
V : ndarray
Mode shapes array as columns.
See also
-------
Toeplitz
"""
r = T.r
l = T.l
αb = np.linalg.lstsq(T[:,-no*r:],
-T[:,-(no+1)*r:-no*r], rcond=None)[0]
Apcomp = np.zeros((no*r,no*r))
Apcomp[:-r,r:] += np.eye((no-1)*r)
for kk in range(no):
Apcomp[-r:,r*kk:r*(kk+1)] -= αb.T[:,r*(no-kk)-r:r*(no-kk)]
Λd, Ψ = np.linalg.eig(Apcomp)
λ = np.log(Λd)*T.fs
fn = np.abs(λ)/(2*np.pi)
zt = -np.real(λ)/np.abs(λ)
Gmref = (Ψ[:r,:]).T
Γmref = np.zeros((no*r,no*r),dtype=np.complex_)
for ii in range(no):
Γmref[:,ii*r:(ii+1)*r] = np.diag(Λd**(no-ii-1)) @ Gmref
V = T[:l,-no*r:] @ np.linalg.inv(Γmref)
return fn, zt, V
#-----------------------------------------------------------------------------
def IV_iterator(yk, i, nmin, nmax, incr=2, plot=False):
"""
Iterate the IV function for model orders from nmin to nmax and step equal
incr.
Estimate the eigenfrequencies, damping ratios and mode shapes using IV
algorithm for increasing state space orders.
Parameters
-------
yk : MRPy_like
MRPy object returned by rearrange_data function.
i : int
Number of time lags used to calculate the covariances length.
Note that they are estimated up to 2*i-1 time lags.
nmin : int
The starting order number of the state space model.
nmax : int
The end order number of the state space model.
incr : int, optional
Step, spacing between model orders. The default step size is 2.
plot : bool, optional
If true, plots the singular values graph of the Toeplitz matrix.
Default is false.
Returns
-------
FN : ndarray
Eigenfrequencies 2D array. Each row originates from the same state
space model.
ZT : ndarray
Damping ratios 2D array. Each row originates from the same state
space model.
VV : ndarray
Mode shapes 3D array. The first index selects the state space order.
Notes
-------
The relation between ARMA order p and state space order n is n = p * r.
The modal parameters of the first nmin state space model are FN[0,:],
ZT[0,:] and VV[0,:,:].
"""
T = Toeplitz(yk,i)
T.method = 'IV'
if plot: plot_singular_values(T)
n = np.arange(nmin,nmax+incr,incr)
FN = np.zeros((n.shape[0],nmax*T.r))
ZT = np.zeros((n.shape[0],nmax*T.r))
VV = np.zeros((n.shape[0],T.l,nmax*T.r),dtype=np.complex_)
for ii, no in np.ndenumerate(n):
FN[ii,:no*T.r], ZT[ii,:no*T.r], VV[ii,:,:no*T.r] = IV(T,no)
return FN, ZT, VV
#-----------------------------------------------------------------------------
def stabilization_diagram(FN, ZT, VV, title,
tol = np.array(([0.01,0, 100],
[0.05,0,0.05],
[0.10,0, 1])), plot=True):
"""
Compute the stable poles and plot the stabilization diagram
Parameters
-------
FN, ZT, VV
Modal parameters returned by SSI_COV_Iterator, SSI_DATA_Iterator and
IV_Iterator functions.
title : str
Graph title.
tol : ndarray, optional
Array of stabilization criteria.
Rows: frequencies, damping ratios and MAC values respectively.
Columns: percentage tolerance, minimum and maximum values respectively.
Default is:
[0.01,0,100 ] Δf = 1%; fmin = 0 Hz; fmax = 100 Hz
[0.05,0,0.05] Δζ = 5%; ζmin = 0%; ζmax = 5%
[0.10,0,1 ] MAC >= (1 - 0.10) = 0.90
plot : bool, optional
If true, plots the stabilization diagram. Default is false.
Returns
-------
stb : array_like
Boolean array that contains True for stable poles. Each row originates
from the same state space model.
Notes
-------
First stb index refers to model order. For example, the last stable poles
row stb[-1,:] originates from nmax model order.
"""
nmin = np.count_nonzero(FN, axis=1)[0]
nmax = np.count_nonzero(FN, axis=1)[-1]
incr = (nmax-nmin)//(FN.shape[0]-1)
n = | np.arange(nmin,nmax+incr,incr) | numpy.arange |
import os
import sys
import time
import json
import argparse
import librosa
import numpy as np
from tqdm import tqdm
from glob import glob
from typing import Any
from tf_lite.filter import Filter
from tf_lite.tf_lite import TFLiteModel
class Dataset_Filter:
def __init__(self,
dataset: str,
filter: TFLiteModel,
**kwargs: Any) -> None:
# dataset variables
self.dataset = dataset
self.audio_metadata = json.load(open(dataset, 'r'))
self.wake_word = kwargs['wake_word']
# filter class variables
self.filter = Filter(model_dir=args.models_dir)
self.num_filter_outputs = self.filter.num_outputs()
# audio parameters
self.sr = kwargs['sample_rate']
self.fw = kwargs['frame_width']
self.frame_len = self.sr // 1000 * self.fw
# data locations
self.out_dir = kwargs['out_dir']
self.data_dir = kwargs['data_dir']
# make directory structure for dataset
self.dataset_name = os.path.basename(dataset).replace('.json', '')
self.dataset_dir = os.path.join(self.out_dir, self.dataset_name)
os.makedirs(self.dataset_dir, exist_ok=True)
os.makedirs(os.path.join(self.dataset_dir, self.wake_word), exist_ok=True)
os.makedirs(os.path.join(self.dataset_dir, f'not-{self.wake_word}'), exist_ok=True)
def filter_audio_file(self, audio_file: str) -> None:
features = []
# load audio from file
samples, _ = librosa.load(os.path.join(self.data_dir, audio_file), sr=self.sr)
# frame audio and process it through filter
for start_idx in np.arange(0, len(samples), self.frame_len):
frame = samples[start_idx:start_idx+self.frame_len]
if len(frame) < self.frame_len:
pad_len = self.frame_len - len(frame)
frame = | np.pad(frame, (0,pad_len), mode='constant') | numpy.pad |
# coding:utf-8
# Test for upsample_2d
# Created : 7, 5, 2018
# Revised : 7, 5, 2018
# All rights reserved
#------------------------------------------------------------------------------------------------
__author__ = 'dawei.leng'
import os, sys
os.environ['THEANO_FLAGS'] = "floatX=float32, mode=FAST_RUN, warn_float64='raise'"
import theano
from theano import tensor
from dandelion.module import *
from dandelion.functional import upsample_2d, upsample_2d_bilinear
from lasagne.layers import InputLayer, get_output, Upscale2DLayer
import dandelion
dandelion_path = os.path.split(dandelion.__file__)[0]
print('dandelion path = %s\n' % dandelion_path)
class build_model_D(Module):
def __init__(self, ratio=[2, 3], mode='repeat'):
super().__init__()
self.ratio = ratio
self.mode = mode
self.predict = self.forward
def forward(self, x):
"""
:param x: (B, C, H, W)
:return:
"""
x = upsample_2d(x, ratio=self.ratio, mode=self.mode)
# x = relu(x)
return x
def build_model_L(ratio=[2,3], mode='repeat'):
input_var = tensor.ftensor4('x') # (B, C, H, W)
input0 = InputLayer(shape=(None, None, None, None), input_var=input_var, name='input0')
x = Upscale2DLayer(input0, scale_factor=ratio, mode=mode)
return x
def test_case_0():
import numpy as np
from lasagne_ext.utils import get_layer_by_name
ratio = [1, 2]
mode = 'dilate'
model_D = build_model_D(ratio=ratio, mode=mode)
model_L = build_model_L(ratio=ratio, mode=mode)
X = get_layer_by_name(model_L, 'input0').input_var
y_D = model_D.forward(X)
y_L = get_output(model_L)
fn_D = theano.function([X], y_D, no_default_updates=True, on_unused_input='ignore')
fn_L = theano.function([X], y_L, no_default_updates=True, on_unused_input='ignore')
for i in range(20):
B = np.random.randint(low=1, high=16)
C = np.random.randint(low=1, high=32)
H = np.random.randint(low=5, high=256)
W = np.random.randint(low=5, high=255)
x = np.random.rand(B, C, H, W).astype(np.float32) - 0.5
y_D = fn_D(x)
y_L = fn_L(x)
# print(y_D)
diff = np.max( | np.abs(y_D - y_L) | numpy.abs |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Finetuning the library models for sequence classification on GLUE-style tasks
(BERT, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa); modified for Dataset Cartography.
"""
import _jsonnet
import argparse
import glob
import json
import logging
import numpy as np
import os
import random
import shutil
import torch
from scipy.spatial import distance
from scipy.stats import entropy
from itertools import cycle
import torch.nn as nn
#label propagation
import word_level_augment
import torch.nn.functional as F
import mmd_loss
from torch.utils.data import Dataset
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
BertConfig,
BertTokenizer,
RobertaConfig,
RobertaTokenizer,
get_linear_schedule_with_warmup,
)
from cartography.classification.glue_utils import adapted_glue_compute_metrics as compute_metrics
from cartography.classification.glue_utils import adapted_glue_convert_examples_to_features as convert_examples_to_features
from cartography.classification.glue_utils import glue_output_modes as output_modes
from cartography.classification.glue_utils import glue_processors as processors
from cartography.classification.diagnostics_evaluation import evaluate_by_category
from cartography.classification.models import (
AdaptedBertForMultipleChoice,
AdaptedBertForSequenceClassification,
AdaptedRobertaForMultipleChoice,
AdaptedRobertaForSequenceClassification
)
from cartography.classification.multiple_choice_utils import convert_mc_examples_to_features
from cartography.classification.params import Params, save_args_to_file
from cartography.selection.selection_utils import log_training_dynamics
from cartography.data_utils_glue import convert_string_to_unique_number
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(
tuple(conf.pretrained_config_archive_map.keys())
for conf in (
BertConfig,
RobertaConfig,
)
),
(),
)
MODEL_CLASSES = {
"bert": (BertConfig, AdaptedBertForSequenceClassification, BertTokenizer),
"bert_mc": (BertConfig, AdaptedBertForMultipleChoice, BertTokenizer),
"roberta": (RobertaConfig, AdaptedRobertaForSequenceClassification, RobertaTokenizer),
"roberta_mc": (RobertaConfig, AdaptedRobertaForMultipleChoice, RobertaTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
class TripleDataset(torch.utils.data.Dataset):
def __init__(self, *datasets):
self.datasets = datasets
def __getitem__(self, i):
return tuple(d[i] for d in self.datasets)
def __len__(self):
return min(len(d) for d in self.datasets)
def train(args, train_dataset, model, tokenizer, flag_in_training):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
# train_sampler = RandomSampler(
# train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(
train_dataset, batch_size=args.train_batch_size, shuffle=True)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (
len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0
},
]
if flag_in_training =='finetune':
optimizer = AdamW(optimizer_grouped_parameters, lr=args.finetune_learning_rate, eps=args.adam_epsilon)
else:
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
# Train!
# args.local_rank = -1
# get_world_size = 1
# args.train_batch_size = 128
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" local_rank = %d", args.local_rank)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_this_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
# set global_step to gobal_step of last saved checkpoint from model path
global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0])
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_this_epoch = global_step % (
len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(f" Continuing training from checkpoint, will skip to saved global_step")
logger.info(f" Continuing training from epoch {epochs_trained}")
logger.info(f" Continuing training from global step {global_step}")
logger.info(f" Will skip the first {steps_trained_in_this_epoch} steps in the first epoch")
tr_loss, logging_loss, epoch_loss = 0.0, 0.0, 0.0
model.zero_grad()
if flag_in_training =='finetune':
train_iterator = trange(epochs_trained,
(int(args.ft_num_train_epochs)*3),
desc="Epoch",
disable=args.local_rank not in [-1, 0],
mininterval=10,
ncols=100)
else:
train_iterator = trange(epochs_trained,
int(args.num_train_epochs),
desc="Epoch",
disable=args.local_rank not in [-1, 0],
mininterval=10,
ncols=100)
set_seed(args) # Added here for reproductibility
best_dev_performance = 0
best_epoch = epochs_trained
train_acc = 0.0
total_entropy = 0.
total_sample_size = 0
for epoch, _ in enumerate(train_iterator):
epoch_iterator = tqdm(train_dataloader,
desc="Iteration",
disable=args.local_rank not in [-1, 0],
mininterval=10,
ncols=100)
train_iterator.set_description(f"train_epoch: {epoch} train_acc: {train_acc:.4f}")
train_ids = None
train_golds = None
train_logits = None
train_losses = None
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_this_epoch > 0:
steps_trained_in_this_epoch -= 1
continue
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
entropy=outputs[-1]
if train_logits is None: # Keep track of training dynamics.
train_ids = batch[4].detach().cpu().numpy()
train_logits = outputs[1].detach().cpu().numpy()
train_golds = inputs["labels"].detach().cpu().numpy()
train_losses = loss.detach().cpu().numpy()
train_entropy = entropy.detach().cpu().numpy()
print(entropy.size(), "check entropy size")
else:
train_ids = np.append(train_ids, batch[4].detach().cpu().numpy())
train_logits = np.append(train_logits, outputs[1].detach().cpu().numpy(), axis=0)
train_golds = np.concatenate((train_golds, inputs["labels"].detach().cpu().numpy()), 0)
train_losses = np.append(train_losses, loss.detach().cpu().numpy())
train_entropy = np.append(train_entropy, entropy.detach().cpu().numpy())
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if (
args.local_rank in [-1, 0] and
args.logging_steps > 0 and
global_step % args.logging_steps == 0
):
epoch_log = {}
# Only evaluate when single GPU otherwise metrics may not average well
if args.local_rank == -1 and args.evaluate_during_training_epoch:
logger.info(f"From within the epoch at step {step}")
results, _ = evaluate(args, model, tokenizer)
for key, value in results.items():
eval_key = "eval_{}".format(key)
epoch_log[eval_key] = value
epoch_log["learning_rate"] = scheduler.get_lr()[0]
epoch_log["loss"] = (tr_loss - logging_loss) / args.logging_steps
logging_loss = tr_loss
for key, value in epoch_log.items():
tb_writer.add_scalar(key, value, global_step)
logger.info(json.dumps({**epoch_log, **{"step": global_step}}))
if (
args.local_rank in [-1, 0] and
args.save_steps > 0 and
global_step % args.save_steps == 0
):
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
epoch_iterator.set_description(f"lr = {scheduler.get_lr()[0]:.8f}, "
f"loss = {(tr_loss-epoch_loss)/(step+1):.4f}")
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
mean_entropy= np.sum(train_entropy) / np.where(train_entropy < -(np.ones_like(train_entropy) * 1e-10),
np.ones_like(train_entropy),
np.zeros_like(train_entropy)).sum()
logger.info(f"*********************************selected_questions*********************************: {mean_entropy:.4f}***")
#### Post epoch eval ####
# Only evaluate when single GPU otherwise metrics may not average well
if args.local_rank == -1 and args.evaluate_during_training:
best_dev_performance, best_epoch = save_model(
args, model, tokenizer, epoch, best_epoch, best_dev_performance)
log_training_dynamics(output_dir=args.output_dir,
epoch=epoch,
train_ids=list(train_ids),
train_logits=list(train_logits),
train_golds=list(train_golds))
train_result = compute_metrics(args.task_name, np.argmax(train_logits, axis=1), train_golds)
train_acc = train_result["acc"]
epoch_log = {"epoch": epoch,
"train_acc": train_acc,
"best_dev_performance": best_dev_performance,
"avg_batch_loss": (tr_loss - epoch_loss) / args.per_gpu_train_batch_size,
"learning_rate": scheduler.get_lr()[0],}
epoch_loss = tr_loss
logger.info(f" End of epoch : {epoch}")
with open(os.path.join(args.output_dir, f"eval_metrics_train.json"), "a") as toutfile:
toutfile.write(json.dumps(epoch_log) + "\n")
for key, value in epoch_log.items():
tb_writer.add_scalar(key, value, global_step)
logger.info(f" {key}: {value:.6f}")
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
elif args.evaluate_during_training and epoch - best_epoch >= args.patience:
logger.info(f"Ran out of patience. Best epoch was {best_epoch}. "
f"Stopping training at epoch {epoch} out of {args.num_train_epochs} epochs.")
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def interleave(x, size):
s = list(x.shape)
return x.reshape([-1, size] + s[1:]).transpose(0, 1).reshape([-1] + s[1:])
def lp_train(args, train_dataset, single_dataset, single_aug_dataset, model, tokenizer, flag_in_training):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_dataloader = DataLoader(
train_dataset, batch_size=args.train_batch_size, shuffle=True)
if args.label_propagation and args.do_finetune:
single_aug= TripleDataset(single_dataset, single_aug_dataset)
single_train_dataloader = DataLoader(
single_aug, batch_size=args.train_batch_size, shuffle=True)
if args.max_steps > 0:
t_total = args.max_steps
args.ft_num_train_epochs = args.max_steps // (
len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.ft_num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0
},
]
if flag_in_training =='finetune':
optimizer = AdamW(optimizer_grouped_parameters, lr=args.finetune_learning_rate, eps=args.adam_epsilon)
else:
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
for param_group in optimizer.param_groups:
param_group['lr'] =args.finetune_learning_rate
# scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
# Train!
# args.local_rank = -1
# get_world_size = 1
# args.train_batch_size = 128
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.ft_num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" local_rank = %d", args.local_rank)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_this_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
# set global_step to gobal_step of last saved checkpoint from model path
global_step = 0
# global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0])
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_this_epoch = global_step % (
len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(f" Continuing training from checkpoint, will skip to saved global_step")
logger.info(f" Continuing training from epoch {epochs_trained}")
logger.info(f" Continuing training from global step {global_step}")
logger.info(f" Will skip the first {steps_trained_in_this_epoch} steps in the first epoch")
tr_loss, logging_loss, epoch_loss = 0.0, 0.0, 0.0
model.zero_grad()
if flag_in_training =='finetune':
train_iterator = trange(epochs_trained,
int(args.ft_num_train_epochs),
desc="Epoch",
disable=args.local_rank not in [-1, 0],
mininterval=10,
ncols=100)
else:
train_iterator = trange(epochs_trained,
int(args.num_train_epochs),
desc="Epoch",
disable=args.local_rank not in [-1, 0],
mininterval=10,
ncols=100)
set_seed(args) # Added here for reproductibility
best_dev_performance = 0
best_epoch = epochs_trained
train_acc = 0.0
total_entropy = 0.
total_sample_size = 0
single_iter = iter(single_train_dataloader)
for epoch, _ in enumerate(train_iterator):
epoch_iterator = tqdm(train_dataloader,
desc="Iteration",
disable=args.local_rank not in [-1, 0],
mininterval=10,
ncols=100)
train_iterator.set_description(f"train_epoch: {epoch} train_acc: {train_acc:.4f}")
train_ids = None
train_golds = None
train_logits = None
train_losses = None
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_this_epoch > 0:
steps_trained_in_this_epoch -= 1
continue
model.train()
try:
inputs_u_w, inputs_u_s = single_iter.next()
except StopIteration:
single_iter = iter(single_train_dataloader)
inputs_u_w, inputs_u_s = single_iter.next()
batch = tuple(t.to(args.device) for t in batch)
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.label_propagation and args.do_finetune:
# import pdb
# pdb.set_trace()
batch_single = tuple(t.to(args.device) for t in inputs_u_w)
inputs_single = {"input_ids": batch_single[0], "attention_mask": batch_single[1], "labels": batch_single[3]}
if args.model_type != "distilbert":
inputs_single["token_type_ids"] = (
batch_single[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
batch_single_aug = tuple(t.to(args.device) for t in inputs_u_s)
inputs_single_aug = {"input_ids": batch_single_aug[0], "attention_mask": batch_single_aug[1], "labels": batch_single_aug[3]}
if args.model_type != "distilbert":
inputs_single_aug["token_type_ids"] = (
batch_single_aug[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
multi_before_softmax = outputs[1]
single_outputs = model(**inputs_single)
alpha = 1. # 1., 0.5 is enough
lsmooth_tau = 0.6
lamb = np.random.beta(alpha, alpha)
index = min(multi_before_softmax.shape[0], single_outputs[1].shape[0])
print(multi_before_softmax.size(), 'multi_before')
print(single_outputs[1].size(), 'single_output')
mixup_feature = multi_before_softmax[:index] * lamb + single_outputs[1][:index] * (1 - lamb)
# soft label v2
soft_label_tau = .75
soft_label = torch.softmax(single_outputs[1][:index], dim=-1) * soft_label_tau \
+ inputs_single['labels'][:index] * (1 - soft_label_tau)
# single+single
shuffle_index = torch.randperm(index).long().cuda()
single_mixup_feature = single_outputs[1][:index][shuffle_index] * lamb + single_outputs[1][:index] * (
1 - lamb)
single_mixup_label = inputs_single['labels'][:index][shuffle_index] * lamb + inputs_single['labels'][:index] * (1 - lamb)
single_loss = nn.KLDivLoss()(F.log_softmax(single_mixup_feature, dim=-1), single_mixup_label)
#multi+multi
mul_shuffle_index = torch.randperm(index).long().cuda()
multi_mixup_feature = outputs[1][:index][mul_shuffle_index] * lamb + outputs[1][:index] * (
1 - lamb)
multi_mixup_label = inputs['labels'][:index][mul_shuffle_index] * lamb + inputs['labels'][:index] * (1 - lamb)
multi_loss = nn.KLDivLoss()(F.log_softmax(multi_mixup_feature, dim=-1),
multi_mixup_label)
mixup_loss = nn.KLDivLoss()(F.log_softmax(mixup_feature, dim=-1), inputs['labels'][:index] * lamb + soft_label* (1. - lamb))
jsd_mixup_loss = JSDLossSoft()(F.log_softmax(mixup_feature, dim=-1), inputs['labels'][:index]) * lamb \
+ JSDLossSoft()(F.log_softmax(mixup_feature, dim=-1),
inputs_single['labels'][:index] * lsmooth_tau
+ (1. - lsmooth_tau) * torch.ones_like(inputs_single['labels'][:index])) \
* (1. - lamb)
print(mixup_loss.item(), jsd_mixup_loss.item(), "mixup_loss.item(), jsd_mixup_loss.item()")
reg_loss = mixup_loss * 1. + jsd_mixup_loss * 0. # check .1 or not
print(loss.mean().item(), 'original_loss')
print(reg_loss, 'reg_loss')
loss = multi_loss #+ reg_loss + single_loss
entropy=outputs[-1]
if train_logits is None: # Keep track of training dynamics.
train_ids = batch[4].detach().cpu().numpy()
train_logits = outputs[1].detach().cpu().numpy()
train_golds = inputs["labels"].detach().cpu().numpy()
train_losses = loss.detach().cpu().numpy()
train_entropy = entropy.detach().cpu().numpy()
print(entropy.size(), "check entropy size")
else:
train_ids = np.append(train_ids, batch[4].detach().cpu().numpy())
train_logits = np.append(train_logits, outputs[1].detach().cpu().numpy(), axis=0)
train_golds = np.concatenate((train_golds, inputs["labels"].detach().cpu().numpy()), 0)
train_losses = np.append(train_losses, loss.detach().cpu().numpy())
train_entropy = np.append(train_entropy, entropy.detach().cpu().numpy())
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if (
args.local_rank in [-1, 0] and
args.logging_steps > 0 and
global_step % args.logging_steps == 0
):
epoch_log = {}
# Only evaluate when single GPU otherwise metrics may not average well
if args.local_rank == -1 and args.evaluate_during_training_epoch:
logger.info(f"From within the epoch at step {step}")
results, _ = evaluate(args, model, tokenizer)
for key, value in results.items():
eval_key = "eval_{}".format(key)
epoch_log[eval_key] = value
epoch_log["learning_rate"] = scheduler.get_lr()[0]
epoch_log["loss"] = (tr_loss - logging_loss) / args.logging_steps
logging_loss = tr_loss
for key, value in epoch_log.items():
tb_writer.add_scalar(key, value, global_step)
logger.info(json.dumps({**epoch_log, **{"step": global_step}}))
if (
args.local_rank in [-1, 0] and
args.save_steps > 0 and
global_step % args.save_steps == 0
):
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
epoch_iterator.set_description(f"lr = {scheduler.get_lr()[0]:.8f}, "
f"loss = {(tr_loss-epoch_loss)/(step+1):.4f}")
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
# mean_entropy = total_entropy / total_sample_size
mean_entropy= np.sum(train_entropy) / np.where(train_entropy < -(np.ones_like(train_entropy) * 1e-10),
np.ones_like(train_entropy),
np.zeros_like(train_entropy)).sum()
logger.info(f"*********************************selected_questions*********************************: {mean_entropy:.4f}***")
#### Post epoch eval ####
# Only evaluate when single GPU otherwise metrics may not average well
if args.local_rank == -1 and args.evaluate_during_training:
best_dev_performance, best_epoch = save_model(
args, model, tokenizer, epoch, best_epoch, best_dev_performance)
log_training_dynamics(output_dir=args.output_dir,
epoch=epoch,
train_ids=list(train_ids),
train_logits=list(train_logits),
train_golds=list(train_golds))
train_result = compute_metrics(args.task_name, np.argmax(train_logits, axis=1), train_golds)
train_acc = train_result["acc"]
epoch_log = {"epoch": epoch,
"train_acc": train_acc,
"best_dev_performance": best_dev_performance,
"avg_batch_loss": (tr_loss - epoch_loss) / args.per_gpu_train_batch_size,
"learning_rate": scheduler.get_lr()[0],}
epoch_loss = tr_loss
logger.info(f" End of epoch : {epoch}")
with open(os.path.join(args.output_dir, f"eval_metrics_train.json"), "a") as toutfile:
toutfile.write(json.dumps(epoch_log) + "\n")
for key, value in epoch_log.items():
tb_writer.add_scalar(key, value, global_step)
logger.info(f" {key}: {value:.6f}")
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
elif args.evaluate_during_training and epoch - best_epoch >= args.patience:
logger.info(f"Ran out of patience. Best epoch was {best_epoch}. "
f"Stopping training at epoch {epoch} out of {args.ft_num_train_epochs} epochs.")
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def save_model(args, model, tokenizer, epoch, best_epoch, best_dev_performance):
results, _ = evaluate(args, model, tokenizer, prefix="in_training")
# TODO(SS): change hard coding `acc` as the desired metric, might not work for all tasks.
desired_metric = "acc"
dev_performance = results.get(desired_metric)
# if dev_performance > best_dev_performance:
if True:
best_epoch = epoch
best_dev_performance = dev_performance
# Save model checkpoint
# Take care of distributed/parallel training
model_to_save = (model.module if hasattr(model, "module") else model)
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
logger.info(f"*** Found BEST model, and saved checkpoint. "
f"BEST dev performance : {dev_performance:.4f} ***")
return best_dev_performance, best_epoch
#Entropy
def JSD(P, Q):
M = 0.5 * (P + Q)
# print('entropy', entropy(P, M), P, M)
return 0.5 * (entropy(P, M) + entropy(Q, M))
#torch Kl_div
def JSD_2(P, Q):
P= np.array(P, dtype=float)
Q= np.array(Q, dtype=float)
M = 0.5 * (P+Q)
_jsd = 0.5* ((torch.nn.functional.kl_div(torch.log(torch.from_numpy(M)), torch.from_numpy(P)).numpy() - 0) + (torch.nn.functional.kl_div(torch.log(torch.from_numpy(M)), torch.from_numpy(Q)).numpy() - 0))
return _jsd
def evaluate(args, model, tokenizer, prefix="", eval_split="dev"):
eval_task_names = (args.task_name,)
eval_outputs_dirs = (args.output_dir,)
results = {}
all_predictions = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset, pair_id = eval_load_and_cache_examples(
args, eval_task, tokenizer, evaluate=True, data_split=f"{eval_split}_{prefix}")
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(
eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info(f"***** Running {eval_task} {prefix} evaluation on {eval_split} *****")
logger.info(f" Num examples = {len(eval_dataset)}")
logger.info(f" Batch size = {args.eval_batch_size}")
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
example_ids = []
gold_labels = []
for batch in tqdm(eval_dataloader, desc="Evaluating", mininterval=10, ncols=100):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
example_ids += batch[4].tolist()
gold_labels += batch[3].tolist()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(
out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == "classification":
probs = torch.nn.functional.softmax(torch.Tensor(preds), dim=-1)
if args.do_temperature:
probs = torch.nn.functional.softmax(torch.Tensor(preds)/1.75, dim=-1)
max_confidences = (torch.max(probs, dim=-1)[0]).tolist()
preds = np.argmax(preds, axis=1) # Max of logit is the same as max of probability.
elif args.output_mode == "regression":
preds = np.squeeze(preds)
result = compute_metrics(eval_task, preds, out_label_ids)
# order: [E, N, C]
results.update(result)
output_eval_file = os.path.join(
eval_output_dir, f"eval_metrics_{eval_task}_{eval_split}_{prefix}.json")
logger.info(f"***** {eval_task} {eval_split} results {prefix} *****")
for key in sorted(result.keys()):
logger.info(f"{eval_task} {eval_split} {prefix} {key} = {result[key]:.4f}")
with open(output_eval_file, "a") as writer:
writer.write(json.dumps(results) + "\n")
# predictions
all_predictions[eval_task] = []
output_pred_file = os.path.join(
eval_output_dir, f"predictions_{eval_task}_{eval_split}_{prefix}.lst")
with open(output_pred_file, "w") as writer:
logger.info(f"***** Write {eval_task} {eval_split} predictions {prefix} *****")
for ex_id, pred, gold, max_conf, prob in zip(
example_ids, preds, gold_labels, max_confidences, probs.tolist()):
# print(pred, prob, gold);input()
# print('gold_label', processors[args.task_name]().get_labels()[int(max(gold))])
record = {"guid": ex_id,
"label": processors[args.task_name]().get_labels()[pred],
"gold": processors[args.task_name]().get_labels()[int( | np.argmax(gold) | numpy.argmax |
import numpy as np
import torch
import imageio
import os
import torch.utils.data as data
def InfiniteSampler(n):
"""Data sampler"""
i = n - 1
order = np.random.permutation(n)
while True:
yield order[i]
i += 1
if i >= n:
np.random.seed()
order = np.random.permutation(n)
i = 0
class InfiniteSamplerWrapper(data.sampler.Sampler):
"""Data sampler wrapper"""
def __init__(self, data_source):
self.num_samples = len(data_source)
def __iter__(self):
return iter(InfiniteSampler(self.num_samples))
def __len__(self):
return 2 ** 15
def get_nsamples(data_loader, N):
x = []
n = 0
while n < N:
x_next = next(data_loader)
x_next = x_next.cuda(non_blocking=True)
x.append(x_next)
n += x_next.size(0)
x = torch.cat(x, dim=0)[:N]
return x
def count_trainable_parameters(model):
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
return sum([np.prod(p.size()) for p in model_parameters])
def save_video(imgs, fname, as_gif=False, fps=24, quality=8):
# convert to np.uint8
imgs = (255 * np.clip(imgs.permute(0, 2, 3, 1).detach().cpu().numpy() / 2 + 0.5, 0, 1)).astype(np.uint8)
imageio.mimwrite(fname, imgs, fps=fps, quality=quality)
if as_gif: # save as gif, too
os.system(f'ffmpeg -i {fname} -r 15 '
f'-vf "scale=512:-1,split[s0][s1];[s0]palettegen[p];[s1][p]paletteuse" {os.path.splitext(fname)[0] + ".gif"}')
def color_depth_map(depths, scale=None):
"""
Color an input depth map.
Arguments:
depths -- HxW numpy array of depths
[scale=None] -- scaling the values (defaults to the maximum depth)
Returns:
colored_depths -- HxWx3 numpy array visualizing the depths
"""
_color_map_depths = np.array([
[0, 0, 0], # 0.000
[0, 0, 255], # 0.114
[255, 0, 0], # 0.299
[255, 0, 255], # 0.413
[0, 255, 0], # 0.587
[0, 255, 255], # 0.701
[255, 255, 0], # 0.886
[255, 255, 255], # 1.000
[255, 255, 255], # 1.000
]).astype(float)
_color_map_bincenters = np.array([
0.0,
0.114,
0.299,
0.413,
0.587,
0.701,
0.886,
1.000,
2.000, # doesn't make a difference, just strictly higher than 1
])
if scale is None:
scale = depths.max()
values = np.clip(depths.flatten() / scale, 0, 1)
# for each value, figure out where they fit in in the bincenters: what is the last bincenter smaller than this value?
lower_bin = ((values.reshape(-1, 1) >= _color_map_bincenters.reshape(1, -1)) * np.arange(0, 9)).max(axis=1)
lower_bin_value = _color_map_bincenters[lower_bin]
higher_bin_value = _color_map_bincenters[lower_bin + 1]
alphas = (values - lower_bin_value) / (higher_bin_value - lower_bin_value)
colors = _color_map_depths[lower_bin] * (1 - alphas).reshape(-1, 1) + _color_map_depths[
lower_bin + 1] * alphas.reshape(-1, 1)
return colors.reshape(depths.shape[0], depths.shape[1], 3).astype(np.uint8)
# Virtual camera utils
def to_sphere(u, v):
theta = 2 * np.pi * u
phi = | np.arccos(1 - 2 * v) | numpy.arccos |
# See info about the sl2 file format at:
# * https://wiki.openstreetmap.org/wiki/SL2
# * https://github.com/kmpm/node-sl2format/blob/master/doc/sl2fileformat.md
# * https://www.geotech1.com/forums/showthread.php?11159-Lowrance-MCC-saved-data-structure
# * https://github.com/Chris78/sl2decode/blob/master/sl2decode.rb
# * https://github.com/Chris78/sl2decode
#
# Also has sl2+sl3 and extra comments about the field meanings, includes encoder as well:
# https://github.com/risty/SonarLogApi/tree/master/SonarLogAPI/Lowrance
#
# Note: I think all info that appears public about the "flags" field is wrong.
# None of the decoders make use of the flags and I saw little correlation between
# the flags and changes in associated data. There are however some correlations
# like when the GPS values change that appear to correlate with a few flags etc
import os
import os.path
import glob
import struct
import math
import re
import numpy
import pandas
import dask
import dask.dataframe
import xarray
import json
import datetime
import pytz
import tzlocal
import logging
logger = logging.getLogger('lowrance_log_parser')
CACHE_VERSION = 3
PARAQUET_ENGINE='pyarrow'
# Max items in a single pandas.DataFrame, we split and use dask to handle them
MAX_SEGMENT_SIZE = 5 * 1024
# Max bytes of data we expect to see in the sonar data segment
SONAR_DATA_MAX_SIZE = 3072
FORMAT_SLG = 1
FORMAT_SL2 = 2
FORMAT_SL3 = 3
FORMAT_MAP = {
1: 'slg',
2: 'sl2',
3: 'sl3',
}
def FormatToStr(format):
if format not in FORMAT_MAP:
return 'UNK(' + str(format) + ')'
return FORMAT_MAP[format]
FAMILY_MAP = {
0: 'HDS 7',
1: 'Elite 4 CHIRP',
}
def FamilyToStr(family):
if family not in FAMILY_MAP:
return 'UNK(' + str(family) + ')'
return FAMILY_MAP[family]
# Traditional Sonar
PRIMARY = 0
# Traditional Sonar
SECONDARY = 1
# DownScan Imaging
DOWNSCAN = 2
LEFT_SIDESCAN = 3
RIGHT_SIDESCAN = 4
COMPOSITE_SIDESCAN = 5
# 7,8 found in yens new HDS Live sonar log.
# 7 looks like a normal sonar or downscan or something
# but has 2x samples for each frame index
#
# 8 looks like some kinds of other non-image data
# maybe not even uint8 also has 2x samples for each frame index
# like channel 7
#
# All other channels have 1 sample per frame index so it is weird
THREE_DIMENSIONAL = 9
def ChannelToStr(id):
if id == PRIMARY: return 'primary'
elif id == SECONDARY: return 'secondary'
elif id == DOWNSCAN: return 'downscan'
elif id == LEFT_SIDESCAN: return 'left_sidescan'
elif id == RIGHT_SIDESCAN: return 'right_sidescan'
elif id == COMPOSITE_SIDESCAN: return 'composite_sidescan'
else: return 'unknown' + str(id)
#Sonar transducer frequency
FREQUENCY_MAP = {
0 : '200 KHz',
1 : '50 KHz',
2 : '83 KHz',
3 : '455 KHz',
4 : '800 KHz',
5 : '38 KHz',
6 : '28 KHz',
7 : '130 KHz - 210 KHz',
8 : '90 KHz - 150 KHz',
9 : '40 KHz - 60 KHz',
10 : '25 KHz - 45 KHz',
# Any other value is treated like 200 KHz
}
def FrequencyToStr(freq):
if freq not in FREQUENCY_MAP:
#logger.warning('Invalid frequency input: %s assuming 200 KHz', freq)
return 'Unknown frequency value %s assume 200 KHz' % (freq)
return FREQUENCY_MAP[freq]
# Included both forward/reverse because still trying to figure out
# what the flags mean. The public docs I saw didnt seem to match up
# with my sl2 files.
# From: https://github.com/risty/SonarLogApi/blob/master/SonarLogAPI/Lowrance/Frame.cs
#TrackValid = 0,
#Preset_0_1 = 1,
#Unknown0_2 = 2,
#PositionValid = 3,
#Unknown_0_4 = 4,
#CourseOrSpeed_0_5 = 5,
#SpeedValid = 6,
#Preset_0_7 = 7,
#Unknown1_0 = 8,
#AltitudeOrCourseOrSpeed_1_1 = 9,
#Unknown1_2 = 10,
#Unknown1_3 = 11,
#Unknown1_4 = 12,
#Unknown1_5 = 13,
#AltitudeValid = 14,
#HeadingValid = 15
FLAGS_MAP_FORWARD = {
'course_over_ground_valid' : 1 << 15,
'speed_water_valid' : 1 << 14,
'flag_unknown01' : 1 << 13,
'position_valid' : 1 << 12,
'flag_unknown02' : 1 << 11,
'temperature_valid' : 1 << 10,
'speed_gps_valid' : 1 << 9,
'flag_unknown03' : 1 << 8,
'flag_unknown04' : 1 << 7,
'flag_unknown05' : 1 << 6,
'flag_unknown06' : 1 << 5,
'flag_unknown07' : 1 << 4,
'flag_unknown08' : 1 << 3,
'flag_unknown09' : 1 << 2,
'altitude_valid' : 1 << 1,
'heading_valid' : 1 << 0,
}
FLAGS_MAP_REVERSE = {
'course_over_ground_valid' : 1 << 0,
'speed_water_valid' : 1 << 1,
'flag_unknown01' : 1 << 2,
'position_valid' : 1 << 3,
'flag_unknown02' : 1 << 4,
'temperature_valid' : 1 << 5,
'speed_gps_valid' : 1 << 6,
'flag_unknown03' : 1 << 7,
'flag_unknown04' : 1 << 8,
'flag_unknown05' : 1 << 9,
'flag_unknown06' : 1 << 10,
'flag_unknown07' : 1 << 11,
'flag_unknown08' : 1 << 12,
'flag_unknown09' : 1 << 13,
'altitude_valid' : 1 << 14,
'heading_valid' : 1 << 15,
}
# The settings below are used to help try and figure out /reverse engineer the purpose
# of the flags item in the sl2 file by correlating changes in data with
# true/false values in the flags.
#
# Other reverse engineering summaries say these flags mean XYZ_valid I.e.
# when true we expect updates to the data for XYZ. So for example
# position_valid indicates there should be valid data in the longitude/latitude
# fields. I have also noticed that most data fields tend to maintain previous values
# until updated. So you might get a speed_gps of 2km/h and it stays exactly the
# same value for a few data samples, then it changes to something else.
#
# I was expecting thus that the speed_gps_valid flag would be set to true
# when we see this change in the speed_gps value.
#
# Likewise more importantly, I should NEVER see the data for speed_gps change
# while speed_gps_valid = false.
#
# It turns out these assumptions are not correct which leads me to believe
# that these flags have different meanings than previously described
# Choose the bitmask allocation of names to bit
#FLAGS_MAP = FLAGS_MAP_FORWARD
FLAGS_MAP = FLAGS_MAP_REVERSE
def FlagsToStr(flags, sep=','):
s = []
for k,v in FLAGS_MAP.items():
if v & flags: s.append(k + '=true')
else: s.append(k + '=false')
return sep.join(s)
def FeetToMeters(value): return value / 3.2808399
SECONDS_PER_MINUTE = 60
MINUTES_PER_HOUR = 60
def KnotsToKmph(value): return (value / 1.94385) / 1000 * SECONDS_PER_MINUTE * MINUTES_PER_HOUR
def RadiansToDegrees(rad): return rad * 180.0 / math.pi
# Copied from datashader.utils, importing datashader is very slow so we duplicate this here
# as we dont care about the rest of it
def lnglat_to_meters(longitude, latitude):
"""
Projects the given (longitude, latitude) values into Web Mercator
coordinates (meters East of Greenwich and meters North of the Equator).
Longitude and latitude can be provided as scalars, Pandas columns,
or Numpy arrays, and will be returned in the same form. Lists
or tuples will be converted to Numpy arrays.
Examples:
easting, northing = lnglat_to_meters(-40.71,74)
easting, northing = lnglat_to_meters(np.array([-74]),np.array([40.71]))
df=pandas.DataFrame(dict(longitude=np.array([-74]),latitude=np.array([40.71])))
df.loc[:, 'longitude'], df.loc[:, 'latitude'] = lnglat_to_meters(df.longitude,df.latitude)
"""
if isinstance(longitude, (list, tuple)):
longitude = numpy.array(longitude)
if isinstance(latitude, (list, tuple)):
latitude = numpy.array(latitude)
origin_shift = numpy.pi * 6378137
easting = longitude * origin_shift / 180.0
northing = numpy.log(numpy.tan((90 + latitude) * numpy.pi / 360.0)) * origin_shift / numpy.pi
return (easting, northing)
# Implemented just doing reverse of lnglat_to_meters
def meters_to_lnglat(easting, northing):
if isinstance(easting, (list, tuple)):
easting = numpy.array(easting)
if isinstance(northing, (list, tuple)):
northing = numpy.array(northing)
origin_shift = numpy.pi * 6378137
longitude = easting / origin_shift * 180.0
latitude = (numpy.arctan(numpy.exp(northing / origin_shift * numpy.pi)) / numpy.pi * 360.0) - 90.0
return (longitude, latitude)
# SL2 format stores Easting and Northing coordinates in Spherical Mercator Projection,
# using WGS84 POLAR Earth radius
#
# OpenStreetMap and Google instead use the WGS84 EQUATORIAL Earth Radius
# So we will convert and use the more popular format
POLAR_EARTH_RADIUS = 6356752.3142;
# https://www.movable-type.co.uk/scripts/latlong-utm-mgrs.html
# A Universal Transverse Mercator coordinate comprises a zone number, a hemisphere (N/S), an easting and a northing.
# Eastings are referenced from the central meridian of each zone, & northings from the equator, both in metres.
# To avoid negative numbers, ‘false eastings’ and ‘false northings’ are used:
# Eastings are measured from 500,000 metres west of the central meridian. Eastings (at the equator) range from 166,021m to 833,978m (the range decreases moving away from the equator); a point on the the central meridian has the value 500,000m.
#
#In the northern hemisphere, northings are measured from the equator – ranging from 0 at the equator to 9,329,005m at 84°N). In the southern hemisphere they are measured from 10,000,000 metres south of the equator (close to the pole) – ranging from 1,116,915m at 80°S to 10,000,000m at the equator.
def UniversalTransverseMercatorToWGS84EquatorialLongitude(polar_longitude):
plon = int(polar_longitude)
equatorial_longitude = plon / POLAR_EARTH_RADIUS * (180.0 / math.pi)
return equatorial_longitude
# From: https://github.com/Chris78/sl2decode/blob/master/sl2decode.rb
# https://github.com/Chris78/sl2decode
UINT32_MAX = 0xffffffff # 4294967295U
def UniversalTransverseMercatorToWGS84EquatorialLatitude(polar_latitude, is_northern_hemisphere=False):
plat = int(polar_latitude)
if not is_northern_hemisphere:
plat = plat - UINT32_MAX
temp = plat / POLAR_EARTH_RADIUS
temp = math.exp(temp)
temp = (2 * math.atan(temp)) - (math.pi / 2)
equatorial_latitude = temp * (180/math.pi)
# @todo Horrible hack, I need to understand this better so we can do the
# correct thing not just copy from others something that only worked for the northern hemisphere
if equatorial_latitude == -90.0 and not is_northern_hemisphere:
return UniversalTransverseMercatorToWGS84EquatorialLatitude(polar_latitude, True)
return equatorial_latitude
# Copied from datashader.utils, importing datashader is very slow so we duplicate this here
# as we dont care about the rest of it
def LowranceLongitudeToWebMercator(longitude):
longitude = UniversalTransverseMercatorToWGS84EquatorialLongitude(longitude)
origin_shift = numpy.pi * 6378137
easting = longitude * origin_shift / 180.0
return easting
def LowranceLatitudeToWebMercator(latitude):
latitude = UniversalTransverseMercatorToWGS84EquatorialLatitude(latitude)
origin_shift = numpy.pi * 6378137
northing = numpy.log(numpy.tan((90 + latitude) * numpy.pi / 360.0)) * origin_shift / numpy.pi
return northing
#Ideally I would like to define the parsing in a single location/definition that we use and is fast
#Something like:
class Field:
def __init__(self, offset, struct_format, name, dtype, attrs={}, conversion=None, description='', notes=''):
self.offset = offset
self.struct_format = struct_format
self.name = name
self.conversion = conversion
self.dtype = None
if dtype is not None:
self.dtype = numpy.dtype(dtype)
self.attrs = attrs
self.description = description
self.notes = notes
if self.conversion is None:
self.conversion = lambda a : a
def GetIfNone(f, inp, name):
if inp is not None:
return inp
return getattr(f, name)
def FieldCopy(src, offset, name, struct_format=None, dtype=None, attrs=None, conversion=None, description=None, notes=None):
for f in src:
if f.name == name:
struct_format = GetIfNone(f, struct_format, 'struct_format')
dtype = GetIfNone(f, dtype, 'dtype')
attrs = GetIfNone(f, attrs, 'attrs')
conversion = GetIfNone(f, conversion, 'conversion')
description = GetIfNone(f, description, 'description')
notes = GetIfNone(f, notes, 'notes')
return Field(offset, struct_format, name, dtype, attrs, conversion, description, notes)
raise Exception('Failed to copy as no field matching name: %s' % (name))
MSEC_TO_NSEC = numpy.uint64(1000000)
def ToDatetime64ns(arg):
# Is in milliseconds (except the first value)
return (numpy.uint64(arg) * MSEC_TO_NSEC).astype('datetime64[ns]')
slX_header_parser = [
Field(0, 'H', 'format', 'uint16', description='Lowrance sonar log file format', notes='One of 1:slg, 2:sl2, 3:sl3'),
# Not sure what options exist and mean here. So far have seen:
# 0 = HDS 7 (Not sure where i got this info from)
# 1 = Elite 5 CHIRP (My unit reports this, not sure what others do)
# 2 = HDS Live 7 (Yens unit reports this, not sure what others do)
Field(2, 'H', 'family', 'uint16', description='Lowrance product hardware version'),
# Not sure about this. So far have seen:
# 1970=Downscan #b207
# 3200=Sidescan #800c
Field(4, 'H', 'block_size', 'uint16', description='Sonar type or blocksize'),
# Supposed to be always 0 according to sl2, but in sl3 logs I have seen it has value of 1
#Field(6, 'H', 'reserved', 'uint16'),
# Mark the end of the fixed length struct without requiring a value decoded from it
Field(8, None, None, None)
]
sl2_block_parser = [
# FrameOffset = 0, //int, 4 bytes : https://github.com/risty/SonarLogApi/tree/master/SonarLogAPI/Lowrance
# Used sometimes to detect we are in a valid offset for decoding
# The last block in some log files (when sonar crashed) this has all zeros
Field(0, 'I', 'block_offset', 'uint32', description='The offset in bytes from start of file this block starts'),
#LastPrimaryChannelFrameOffset = 4, //int, 4 bytes : https://github.com/risty/SonarLogApi/tree/master/SonarLogAPI/Lowrance
#last_primary_channel_block_offset
#LastSecondaryChannelFrameOffset = 8, //int, 4 bytes : https://github.com/risty/SonarLogApi/tree/master/SonarLogAPI/Lowrance
#last_secondary_channel_block_offset
#LastDownScanChannelFrameOffset = 12, //int, 4 bytes : https://github.com/risty/SonarLogApi/tree/master/SonarLogAPI/Lowrance
#last_downscan_channel_block_offset
#LastSidescanLeftChannelFrameOffset = 16, //int, 4 bytes : https://github.com/risty/SonarLogApi/tree/master/SonarLogAPI/Lowrance
#last_left_sidescan_channel_block_offset
#LastSidescanRightChannelFrameOffset = 20, //int, 4 bytes : https://github.com/risty/SonarLogApi/tree/master/SonarLogAPI/Lowrance
#last_right_sidescan_channel_block_offset
#LastSidescanCompositeChannelFrameOffset = 24, //int, 4 bytes : https://github.com/risty/SonarLogApi/tree/master/SonarLogAPI/Lowrance
#last_composite_sidescan_channel_block_offset
#ThisFrameSize = 28, //short, 2 bytes. Bytes to next frame Start : https://github.com/risty/SonarLogApi/tree/master/SonarLogAPI/Lowrance
Field(28, 'H', 'current_block_bytes', 'uint16', description=''),
#PreviousFrameSize = 30, //short, 2 bytes. Bytes to previous frame Start
#last_block_bytes
#ChannelType = 32,//short, 2 bytes
Field(32, 'H', 'channel', 'uint16', description='Identifies type of sonar data like primary, side-scan, downscan etc'),
#PacketSize = 34,//short, 2 bytes : https://github.com/risty/SonarLogApi/tree/master/SonarLogAPI/Lowrance
Field(34, 'H', 'data_size', 'uint16', description='Size of sounding/bounce data'),
#FrameIndex = 36,//int, 4 bytes
Field(36, 'I', 'frame_index', 'uint32', description='Starts at 0. Used to match frames/block on different channels.'),
#UpperLimit = 40,//float, 4 bytes
Field(40, 'f', 'upper_limit', 'float32', {'units': 'meters'}, FeetToMeters, description=''),
#LowerLimit = 44,//float, 4 bytes
Field(44, 'f', 'lower_limit', 'float32', {'units': 'meters'}, FeetToMeters, description=''),
#Frequency = 50,//byte
Field(50, 'B', 'sonar_frequency', 'uint8', description='Sonar frequency'),
#CreationDataTime = 60,//int, 4 bytes,
# value in first frame = Unix time stamp of file creation.
# other frames - time from device boot.
Field(60, 'I', 'datetime', 'datetime64[ns]', {}, ToDatetime64ns, description='Unix timestamp of file creation'),
# Depth = 64,//float, 4 bytes
Field(64, 'f', 'water_depth', 'float32', {'units': 'meters'}, FeetToMeters, description='Depth under water surface(transponder)'),
# Depth under keel
# KeelDepth = 68,//float, 4 bytes
#SpeedGps = 100,//float, 4 bytes
# @todo Validate speed correct, one says m/s we want k/h original said in knots
Field(100, 'f', 'speed_gps', 'float32', {'units': 'km/h'}, KnotsToKmph, description='Speed from GPS in km/h'),
#Temperature = 104,//float, 4 bytes
Field(104, 'f', 'temperature', 'float32', {'units': 'celsius'}, description='Temperature, in Celsius'),
# IntLongitude = 108,//int, 4 bytes
Field(108, 'I', 'longitude', 'float32', {'units': 'meters', 'coordinate system': 'Web Mercator'}, LowranceLongitudeToWebMercator, description=''),
# IntLatitude = 112,//int, 4 bytes
Field(112, 'I', 'latitude', 'float32', {'units': 'meters', 'coordinate system': 'Web Mercator'}, LowranceLatitudeToWebMercator, description=''),
# If such a sensor is not present, it takes the value from "Speed" (GPS NMEA data)
# and sets WaterSpeedValid to false.
#WaterSpeed = 116,//float, 4 bytes
Field(116, 'f', 'speed_water', 'float32', {'units': 'kmph'}, KnotsToKmph, description='WaterSpeed in m/s. This value is taken from an actual Water Speed Sensor (such as a paddle-wheel).'),
#CourseOverGround = 120,//float, 4 bytes
Field(120, 'f', 'course_over_ground', 'float32', {'units': 'degrees'}, RadiansToDegrees, description='Track/Course-Over-Ground in radians. Real direction of boat movement. Taken from GPS NMEA data. '),
#Altitude = 124,//float, 4 bytes
Field(124, 'f', 'altitude', 'float32', {'units': 'meters'}, FeetToMeters, description='Altitude in meters. Taken from GPS NMEA data.'),
#Heading = 128,//float, 4 bytes
Field(128, 'f', 'heading', 'float32', {'units': 'degrees'}, RadiansToDegrees, description='Heading in radians. Angle in radians between magnetic north and transducer.'),
#Flags = 132, // two bytes
#Field(132, 'H', 'flags', 'uint16', {}, description=''),
# I validated that this appears in logs from my device to be msec since the device
# booted. I.e. Not start of log as it doesnt start at t=0
# TimeOffset = 140,//int, 4 bytes.
Field(140, 'I', 'time_offset', 'uint32', {}, description='Duration since device boot in msec'),
# Mark the end of the fixed length struct without requiring a value decoded from it
Field(144, None, None, None)
# Contains sounding/bounce data
#SoundedData = 144// bytes array, size of PacketSize
]
sl3_block_parser = [
#FrameOffset = 0,//int, 4 bytes
FieldCopy(sl2_block_parser, 0, 'block_offset'),
#ThisFrameSize = 8, //short, 2 bytes. Bytes to next frame Start : https://github.com/risty/SonarLogApi/tree/master/SonarLogAPI/Lowrance
FieldCopy(sl2_block_parser, 8, 'current_block_bytes'),
#PreviousFrameSize = 10,//short, 2 bytes. Bytes to previous frame Start
#last_block_bytes
#ChannelType = 12,//short, 2 bytes
FieldCopy(sl2_block_parser, 12, 'channel'),
#FrameIndex = 16,//int, 4 bytes
FieldCopy(sl2_block_parser, 16, 'frame_index'),
#UpperLimit = 20,//float, 4 bytes
FieldCopy(sl2_block_parser, 20, 'upper_limit'),
#LowerLimit = 24,//float, 4 bytes
FieldCopy(sl2_block_parser, 24, 'lower_limit'),
#CreationDataTime = 40,//int, 4 bytes, value at fist frame = Unix time stamp of file creation. if GPS cant find position value will be "-1" # value in first frame = Unix time stamp of file creation.
#//other frames - time in milliseconds from device boot.
FieldCopy(sl2_block_parser, 40, 'datetime'),
#PacketSize = 44,//short, 2 bytes
FieldCopy(sl2_block_parser, 44, 'data_size'),
#Depth = 48,//float, 4 bytes
FieldCopy(sl2_block_parser, 48, 'water_depth'),
#Frequency = 52,//byte
FieldCopy(sl2_block_parser, 52, 'sonar_frequency'),
#SpeedGps = 84,//float, 4 bytes
FieldCopy(sl2_block_parser, 84, 'speed_gps'),
#Temperature = 88,//float, 4 bytes
FieldCopy(sl2_block_parser, 88, 'temperature'),
#IntLongitude = 92,//int, 4 bytes
FieldCopy(sl2_block_parser, 92, 'longitude'),
#IntLatitude = 96,//int, 4 bytes
FieldCopy(sl2_block_parser, 96, 'latitude'),
#WaterSpeed = 100,//float, 4 bytes
FieldCopy(sl2_block_parser, 100, 'speed_water'),
#CourseOverGround = 104,//float, 4 bytes
FieldCopy(sl2_block_parser, 104, 'course_over_ground'),
#Altitude = 108,//float, 4 bytes
FieldCopy(sl2_block_parser, 108, 'altitude'),
#Heading = 112,//float, 4 bytes
FieldCopy(sl2_block_parser, 112, 'heading'),
#Flags = 116, // two bytes
#FieldCopy(sl2_block_parser, 116, 'flags'),
#TimeOffset = 124,//int, 4 bytes, time in milliseconds from log file creation.
FieldCopy(sl2_block_parser, 124, 'time_offset'),
# ... Need extra fields ...
# Mark the end of the fixed length struct without requiring a value decoded from it
Field(144, None, None, None)
# Data starts at different locations for channel 7,8 and others
# others start at 168, channels 7,8 start at 128
#SoundedData = 168// bytes array
]
# Only used for certain channels as overlaps sonar data otherwise on some of the newer channels
# Newer channels 7 & 8 dont seem to have this block and also have 2x blocks per frame_index
sl3_extra_offsets_parser = [
#LastPrimaryChannelFrameOffset = 128, //int, 4 bytes
#last_primary_channel_block_offset
#LastSecondaryChannelFrameOffset = 132, //int, 4 bytes
#last_secondary_channel_block_offset
#LastDownScanChannelFrameOffset = 136, //int, 4 bytes
#last_downscan_channel_block_offset
#LastSidescanLeftChannelFrameOffset = 140, //int, 4 bytes
#last_left_sidescan_channel_block_offset
#LastSidescanRightChannelFrameOffset = 144, //int, 4 bytes
#last_right_sidescan_channel_block_offset
#LastSidescanCompositeChannelFrameOffset = 148,//int, 4 bytes
#last_composite_sidescan_channel_block_offset
#LastThreeDChannelFrameOffset = 164,//int, 4 bytes
#last_3d_channel_frame_offset
# Mark the end of the fixed length struct without requiring a value decoded from it
Field(168, None, None, None)
]
def ToStruct(format):
struct_format = '<'
current_offset = 0
for field in format:
# MUST be in offset order
assert(field.offset >= current_offset)
# Add any unused padding bytes to move to the next offset
struct_format += 'x' * (field.offset - current_offset)
current_offset = field.offset
# Simply moves to the next field after setting the current_offset
if field.struct_format is None:
assert(field.name is None)
assert(field.dtype is None)
continue
# Add the new field
size = struct.calcsize(field.struct_format)
struct_format += field.struct_format
current_offset += size
logger.debug('Creating struct with format: %s', struct_format)
return struct.Struct(struct_format)
class DecodedObject:
def __repr__(self): return "{}({!r})".format(self.__class__.__name__, self.__dict__)
def UnpackedToObject(format, unpacked):
obj = DecodedObject()
for i in range(0, len(format)):
field = format[i]
data = unpacked[i]
value = field.dtype.type(field.conversion(data))
setattr(obj, field.name, value)
return obj
class ChannelData(object):
def __init__(self, file_name, cache_name, channel, frame_index, format):
self.file_name = file_name
self.cache_name = cache_name
self.channel = channel
self.format = format
self.current_segment = 0
# Used for mapping new channels when seeing a duplicate frame index
# Stores the frame_index we saw last for this channel
self.last_frame_index = frame_index
self.last_frame_index_count = 1
self.data = DecodedObject()
for field in self.format:
value = | numpy.zeros(MAX_SEGMENT_SIZE, dtype=field.dtype) | numpy.zeros |
import numpy as np
from copy import deepcopy
from utils.visualize import draw_bounding_box
from utils.nms import nms
from utils.metrics import rect_dist
from utils.dense_overlap import compute_dense_overlap
import logging
logger = logging.getLogger("detector")
class DataProcessor:
"""
This is a helper class to abstract out all the operation needed during the data-loading
pipeline of the Tiny Faces object detector.
The idea is that this can act as a mixin that enables paddle dataloaders with the heatmap
generation semantics.
"""
def __init__(self, input_size, heatmap_size, pos_thresh, neg_thresh, templates,
img_means=None, rf=None):
self.input_size = input_size
self.heatmap_size = heatmap_size
self.pos_thresh = pos_thresh
self.neg_thresh = neg_thresh
self.templates = templates
self.rf = rf
self.ofy, self.ofx = rf['offset']
self.sty, self.stx = rf['stride']
self.img_means = img_means or [0.485, 0.456, 0.406]
def crop_image(self, img, bboxes):
"""
Crop a 500x500 patch from the image, taking care for smaller images.
bboxes is the np.array of all bounding boxes [x1, y1, x2, y2]
"""
# randomly pick a cropping window for the image
# We keep the second arg to randint at least 1 since randint is [low, high)
crop_x1 = np.random.randint(0, np.max([1, (img.shape[1] - self.input_size[1] + 1)]))
crop_y1 = np.random.randint(0, np.max([1, (img.shape[0] - self.input_size[0] + 1)]))
crop_x2 = min(img.shape[1], crop_x1 + self.input_size[1])
crop_y2 = min(img.shape[0], crop_y1 + self.input_size[0])
crop_h = crop_y2 - crop_y1
crop_w = crop_x2 - crop_x1
# place the cropped image in a random location in a `input_size` image
paste_box = [0, 0, 0, 0] # x1, y1, x2, y2
paste_box[0] = np.random.randint(0, self.input_size[1] - crop_w + 1)
paste_box[1] = np.random.randint(0, self.input_size[0] - crop_h + 1)
paste_box[2] = paste_box[0] + crop_w
paste_box[3] = paste_box[1] + crop_h
# set this to average image colors
# this will later be subtracted in mean image subtraction
img_buf = np.zeros((self.input_size + (3,)))
# add the average image so it gets subtracted later.
for i, c in enumerate(self.img_means):
img_buf[:, :, i] += c
# img is a int8 array, so we need to scale the values accordingly
img_buf = (img_buf * 255).astype(np.int8)
img_buf[paste_box[1]:paste_box[3], paste_box[0]:paste_box[2], :] = img[crop_y1:crop_y2, crop_x1:crop_x2, :]
if bboxes.shape[0] > 0:
# check if overlap is above negative threshold
tbox = deepcopy(bboxes)
tbox[:, 0] = np.maximum(tbox[:, 0], crop_x1)
tbox[:, 1] = np.maximum(tbox[:, 1], crop_y1)
tbox[:, 2] = np.minimum(tbox[:, 2], crop_x2)
tbox[:, 3] = np.minimum(tbox[:, 3], crop_y2)
overlap = 1 - rect_dist(tbox, bboxes)
# adjust the bounding boxes - first for crop and then for random placement
bboxes[:, 0] = bboxes[:, 0] - crop_x1 + paste_box[0]
bboxes[:, 1] = bboxes[:, 1] - crop_y1 + paste_box[1]
bboxes[:, 2] = bboxes[:, 2] - crop_x1 + paste_box[0]
bboxes[:, 3] = bboxes[:, 3] - crop_y1 + paste_box[1]
# correct for bbox to be within image border
bboxes[:, 0] = np.minimum(self.input_size[1], np.maximum(0, bboxes[:, 0]))
bboxes[:, 1] = np.minimum(self.input_size[0], np.maximum(0, bboxes[:, 1]))
bboxes[:, 2] = np.minimum(self.input_size[1], np.maximum(1, bboxes[:, 2]))
bboxes[:, 3] = np.minimum(self.input_size[0], np.maximum(1, bboxes[:, 3]))
# check to see if the adjusted bounding box is invalid
invalid = np.logical_or(np.logical_or(bboxes[:, 2] <= bboxes[:, 0], bboxes[:, 3] <= bboxes[:, 1]),
overlap < self.neg_thresh)
# remove invalid bounding boxes
ind = np.where(invalid)
bboxes = np.delete(bboxes, ind, 0)
return img_buf, bboxes, paste_box
def get_padding(self, paste_box):
"""
Get the padding of the image based on where the sampled image patch was placed.
:param paste_box: [x1, y1, x2, y2]
:return:
"""
ofy, ofx = self.rf['offset']
sty, stx = self.rf['stride']
vsy, vsx = self.heatmap_size
coarse_x, coarse_y = np.meshgrid(ofx + np.array(range(vsx)) * stx,
ofy + np.array(range(vsy)) * sty)
# each cluster is [x1, y1, x2, y2]
dx1 = self.templates[:, 0]
dy1 = self.templates[:, 1]
dx2 = self.templates[:, 2]
dy2 = self.templates[:, 3]
# compute the bounds
# We add new axes so that the arrays are numpy broadcasting compatible
coarse_xx1 = coarse_x[:, :, np.newaxis] + dx1[np.newaxis, np.newaxis, :] # (vsy, vsx, nt)
coarse_yy1 = coarse_y[:, :, np.newaxis] + dy1[np.newaxis, np.newaxis, :] # (vsy, vsx, nt)
coarse_xx2 = coarse_x[:, :, np.newaxis] + dx2[np.newaxis, np.newaxis, :] # (vsy, vsx, nt)
coarse_yy2 = coarse_y[:, :, np.newaxis] + dy2[np.newaxis, np.newaxis, :] # (vsy, vsx, nt)
# Matlab code indexes from 1 hence to check against it, we need to add +1
# However, in python we don't need the +1 during actual training
padx1 = coarse_xx1 < paste_box[0] + 1
pady1 = coarse_yy1 < paste_box[1] + 1
padx2 = coarse_xx2 > paste_box[2]
pady2 = coarse_yy2 > paste_box[3]
pad_mask = padx1 | pady1 | padx2 | pady2
return pad_mask
def get_regression(self, bboxes, cluster_boxes, iou):
"""
Compute the target bounding box regression values
:param bboxes:
:param cluster_boxes:
:param iou:
:return:
"""
ofy, ofx = self.rf['offset']
sty, stx = self.rf['stride']
vsy, vsx = self.heatmap_size
coarse_xx, coarse_yy = np.meshgrid(ofx + np.array(range(vsx)) * stx,
ofy + np.array(range(vsy)) * sty)
dx1, dy1, dx2, dy2 = cluster_boxes
# We reshape to take advantage of numpy broadcasting
fxx1 = bboxes[:, 0].reshape(1, 1, 1, bboxes.shape[0]) # (1, 1, 1, bboxes)
fyy1 = bboxes[:, 1].reshape(1, 1, 1, bboxes.shape[0])
fxx2 = bboxes[:, 2].reshape(1, 1, 1, bboxes.shape[0])
fyy2 = bboxes[:, 3].reshape(1, 1, 1, bboxes.shape[0])
h = dy2 - dy1 + 1
w = dx2 - dx1 + 1
dhh = h.reshape(1, 1, h.shape[0], 1) # (1, 1, N, 1)
dww = w.reshape(1, 1, w.shape[0], 1) # (1, 1, N, 1)
fcx = (fxx1 + fxx2) / 2
fcy = (fyy1 + fyy2) / 2
tx = np.divide((fcx - coarse_xx.reshape(vsy, vsx, 1, 1)), dww)
ty = np.divide((fcy - coarse_yy.reshape(vsy, vsx, 1, 1)), dhh)
fhh = fyy2 - fyy1 + 1
fww = fxx2 - fxx1 + 1
tw = np.log(np.divide(fww, dww)) # (1, 1, N, bboxes)
th = np.log(np.divide(fhh, dhh))
# Randomly perturb the IOU so that if multiple candidates have the same IOU,
# we don't pick the same one every time. This is useful when the template is smaller than the GT bbox
iou = iou + (1e-6 * np.random.rand(*iou.shape))
best_obj_per_loc = iou.argmax(axis=3)
idx0, idx1, idx2 = np.indices(iou.shape[:-1])
tx = tx[idx0, idx1, idx2, best_obj_per_loc]
ty = ty[idx0, idx1, idx2, best_obj_per_loc]
tw = np.repeat(tw, vsy, axis=0) # (vsy, 1, N, bboxes)
tw = np.repeat(tw, vsx, axis=1) # (vsy, vsx, N, bboxes)
tw = tw[idx0, idx1, idx2, best_obj_per_loc]
th = np.repeat(th, vsy, axis=0)
th = np.repeat(th, vsx, axis=1)
th = th[idx0, idx1, idx2, best_obj_per_loc]
return np.concatenate((tx, ty, tw, th), axis=2), iou
def get_heatmaps(self, bboxes, pad_mask):
ofy, ofx = self.rf['offset']
sty, stx = self.rf['stride']
vsy, vsx = self.heatmap_size
nt = self.templates.shape[0]
# Initiate heatmaps
class_maps = -np.ones((vsy, vsx, nt))
regress_maps = np.zeros((vsy, vsx, nt * 4))
# each cluster is [-w/2, -h/2, w/2, h/2]
dx1, dx2 = self.templates[:, 0], self.templates[:, 2]
dy1, dy2 = self.templates[:, 1], self.templates[:, 3]
# Filter out invalid bbox
invalid = np.logical_or(bboxes[:, 2] <= bboxes[:, 0], bboxes[:, 3] <= bboxes[:, 1])
ind = np.where(invalid)
bboxes = np.delete(bboxes, ind, axis=0)
ng = bboxes.shape[0]
iou = np.zeros((vsy, vsx, self.templates.shape[0], bboxes.shape[0]))
if ng > 0:
gx1, gy1, gx2, gy2 = bboxes[:, 0], bboxes[:, 1], bboxes[:, 2], bboxes[:, 3]
iou = compute_dense_overlap(ofx, ofy, stx, sty, vsx, vsy,
dx1, dy1, dx2, dy2,
gx1, gy1, gx2, gy2,
1, 1)
regress_maps, iou = self.get_regression(bboxes, [dx1, dy1, dx2, dy2], iou)
best_iou = iou.max(axis=3)
# Set max IoU values to 1 (even if they are < pos_thresh, as long as they are above neg_thresh)
per_object_iou = np.reshape(iou, (-1, ng))
fbest_idx = np.argmax(per_object_iou, axis=0)
iou_ = | np.amax(per_object_iou, axis=0) | numpy.amax |
import os
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import util.io as mio
from util import statsUtil
from model.message import Message
SAVE_PLOT = False
def plotBasicLengthStatsByYearAndMonth(data, yearsToShow=None, targetStats=None,
targetSenders=None):
df = statsUtil.filter_stats(data, {'sender':targetSenders, 'year':yearsToShow,
'stat':targetStats})
g = sns.factorplot(x="month", y="val", row="stat", hue='sender', col='year', data=df,
kind="bar", size=3, aspect=2.5, legend_out=False)
g.fig.suptitle('Basic Length Stats')
sns.plt.show()
def plotBasicLengthStatsByHour(data, targetStats=None, targetSenders=None, kind='bar'):
df = statsUtil.filter_stats(data, {'sender':targetSenders, 'stat':targetStats})
g = sns.factorplot(x="hour", y="val", row="stat", hue='sender', data=df,
kind=kind, size=3, aspect=2.5, legend_out=False)
g.fig.suptitle('Basic Length Stats - Hour')
#sns.plt.show()
def plotRichnessVariation(data, targetLabel, yearsToShow=None, targetSenders=None):
df = data.reset_index()
df = statsUtil.filter_stats(df, {'year':yearsToShow, 'sender':targetSenders})
g = sns.factorplot(x=targetLabel, y="lexicalRichness", col="year", hue='sender',
data=df, kind="point", legend_out=False)
g.set(ylabel='lexical richness (%)')
g.fig.suptitle('Vocabulary Richness')
sns.plt.show()
def _genericFactorPlot(data, xTarget, yTarget, filters, title, yLabel, col=None, row=None,
kind='point'):
df = statsUtil.filter_stats(data, filters)
g = sns.factorplot(x=xTarget, y=yTarget, col=col, row=row, hue='sender',
data=df, kind=kind, legend_out=False)
g.set(ylabel=yLabel)
g.fig.suptitle(title)
sns.plt.show()
# does single year only. Use with animations or to see boxplots
def plotSingleBasicLengthStatByYearAndHour(data, stat, yearsToShow=None,
targetSenders=None, ax=None):
df = statsUtil.filter_stats(data, {'sender':targetSenders, 'year':yearsToShow,
'stat':[stat]})
ax = sns.barplot(x="hour", y='val', hue="sender", data=df, ax=ax)
ax.set(ylabel=stat)
#sns.plt.show()
def plotSingleBasicLengthStatHeatmap(data, stat, targetSender, yearsToShow=None):
df = data.xs(targetSender, level='sender')
df = df.reset_index()
def plot(ax, df, count):
df = df.pivot('month', 'day', stat)
# TODO share y. Fix it or try factorgrid
ax = sns.heatmap(df, mask=df.isnull(), ax=ax, vmin=0, vmax=30000)#cmap=ListedColormap(['red', 'blue'])
ax.set(ylabel='month' if count == 1 else '')
_plotByYear(df, "{} ({})".format(stat, targetSender), plot, yearsToShow)
def plotSentimentStatsByHour(sentimentStats, valueNames):
data = statsUtil.transformSentimentStats(sentimentStats, valueNames, ['sender', 'hour'])
ax = sns.factorplot(x="hour", y="val", col="emotion", hue='sender',
data=data, kind="point", sharey=False, legend_out=False)
ax.set(ylabel='mean(val)')
sns.plt.show()
def plotSentimentStatsByYearAndMonth(sentimentStats, valueNames):
data = statsUtil.transformSentimentStats(sentimentStats, valueNames, ['sender', 'year', 'month'])
sns.factorplot(x="month", y="val", row="emotion", hue='sender', col='year',
data=data, kind="point", sharey=False, legend_out=False)
sns.plt.show()
# TODO fill all possible values for the index (dates, month, year)
# Add sender or total labels
def plotWordsCount(wordsCountStats, words, sender=None, yearsToShow=None):
data = wordsCountStats.getWordsCount(words, sender)
if data is None:
return
def plot(ax, df, count):
df.reset_index(level='year').plot(ax=ax)
if 'year' in list(data.index.names):
_plotByYear(data, 'Word count', plot, yearsToShow)
else:
data.plot()
sns.plt.show()
def plotZipfLaw(words, count):
figureAesthetic()
#plt.figure(1).suptitle("Zip's Law", fontsize=20)
ax = plt.subplot(1,2,1)
plt.xlabel("word")
plt.ylabel("frequency")
numWords = 20
x = | np.arange(numWords) | numpy.arange |
import numpy as np
import scipy.stats
import copy
from astropy.tests.helper import pytest
from astropy.modeling import models
from scipy.special import gammaln as scipy_gammaln
from stingray import Lightcurve, Powerspectrum
from stingray.modeling import Posterior, PSDPosterior, \
PoissonPosterior, GaussianPosterior, LaplacePosterior
from stingray.modeling import set_logprior
from stingray.modeling.posterior import logmin
from stingray.modeling.posterior import IncorrectParameterError
from stingray.modeling.posterior import LogLikelihood
np.random.seed(20150907)
class TestMeta(object):
def test_use_loglikelihood_class_directly(self):
with pytest.raises(TypeError):
a = LogLikelihood(1, 2, models.Lorentz1D)
def test_inherit_loglikelihood_improperly(self):
class a(LogLikelihood):
def __init__(self, *args, **kwargs):
LogLikelihood.__init__(self, *args, **kwargs)
with pytest.raises(TypeError):
a(1, 2, models.Lorentz1D)
def test_inherit_loglikelihood_properly(self):
class a(LogLikelihood):
def __init__(self, *args, **kwargs):
LogLikelihood.__init__(self, *args, **kwargs)
def evaluate(self, parameters):
pass
a(1, 2, models.Lorentz1D)
class TestSetPrior(object):
@classmethod
def setup_class(cls):
photon_arrivals = np.sort(np.random.uniform(0,1000, size=10000))
cls.lc = Lightcurve.make_lightcurve(photon_arrivals, dt=1.0)
cls.ps = Powerspectrum(cls.lc, norm="frac")
pl = models.PowerLaw1D()
pl.x_0.fixed = True
cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power, pl, m=cls.ps.m)
def test_set_prior_runs(self):
p_alpha = lambda alpha: ((-1. <= alpha) & (alpha <= 5.))/6.0
p_amplitude = lambda amplitude: ((-10 <= np.log(amplitude)) &
((np.log(amplitude) <= 10.0)))/20.0
priors = {"alpha":p_alpha, "amplitude":p_amplitude}
self.lpost.logprior = set_logprior(self.lpost, priors)
def test_prior_executes_correctly(self):
p_alpha = lambda alpha: ((-1. <= alpha) & (alpha <= 5.))/6.0
p_amplitude = lambda amplitude: ((-10 <= np.log(amplitude)) &
((np.log(amplitude) <= 10.0)))/20.0
priors = {"alpha":p_alpha, "amplitude":p_amplitude}
self.lpost.logprior = set_logprior(self.lpost, priors)
true_logprior = np.log(1./6.) + np.log(1./20.0)
assert self.lpost.logprior([np.exp(0.0), np.exp(0.0)]) == true_logprior
def test_prior_returns_logmin_outside_prior_range(self):
p_alpha = lambda alpha: ((-1. <= alpha) & (alpha <= 5.))/6.0
p_amplitude = lambda amplitude: ((-10 <= np.log(amplitude)) &
((np.log(amplitude) <= 10.0)))/20.0
priors = {"alpha":p_alpha, "amplitude":p_amplitude}
self.lpost.logprior = set_logprior(self.lpost, priors)
assert self.lpost.logprior([-2.0, np.exp(11.0)]) == logmin
class PosteriorClassDummy(Posterior):
"""
This is a test class that tests the basic functionality of the
Posterior superclass.
"""
def __init__(self, x, y, model):
Posterior.__init__(self, x, y, model)
def loglikelihood(self, t0, neg=False):
loglike = 1.0
return loglike
def logprior(self, t0):
lp = 2.0
return lp
class TestPosterior(object):
@classmethod
def setup_class(cls):
cls.x = np.arange(100)
cls.y = np.ones(cls.x.shape[0])
cls.model = models.Const1D()
cls.p = PosteriorClassDummy(cls.x, cls.y, cls.model)
p_alpha = lambda alpha: ((-1. <= alpha) & (alpha <= 5.))/6.0
priors = {"amplitude":p_alpha}
cls.p.logprior = set_logprior(cls.p, priors)
def test_inputs(self):
assert np.allclose(self.p.x, self.x)
assert np.allclose(self.p.y, self.y)
assert isinstance(self.p.model, models.Const1D)
def test_call_method_positive(self):
t0 = [1]
post = self.p(t0, neg=False)
assert post == 1.0 + np.log(1./6.0)
def test_call_method_negative(self):
t0 = [1]
post = self.p(t0, neg=True)
assert post == -(1.0 + np.log(1./6.0))
class TestPSDPosterior(object):
@classmethod
def setup_class(cls):
m = 1
nfreq = 1000000
freq = np.arange(nfreq)
noise = np.random.exponential(size=nfreq)
power = noise*2.0
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1]-freq[0]
ps.norm = "leahy"
cls.ps = ps
cls.a_mean, cls.a_var = 2.0, 1.0
cls.model = models.Const1D()
p_amplitude = lambda amplitude: \
scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)
cls.priors = {"amplitude":p_amplitude}
def test_logprior_fails_without_prior(self):
lpost = PSDPosterior(self.ps.freq, self.ps.power, self.model,
m=self.ps.m)
with pytest.raises(AttributeError):
lpost.logprior([1])
def test_making_posterior(self):
lpost = PSDPosterior(self.ps.freq, self.ps.power,
self.model, m=self.ps.m)
lpost.logprior = set_logprior(lpost, self.priors)
assert lpost.x.all() == self.ps.freq.all()
assert lpost.y.all() == self.ps.power.all()
def test_correct_number_of_parameters(self):
lpost = PSDPosterior(self.ps.freq, self.ps.power,
self.model, m=self.ps.m)
lpost.logprior = set_logprior(lpost, self.priors)
with pytest.raises(IncorrectParameterError):
lpost([2,3])
def test_logprior(self):
t0 = [2.0]
lpost = PSDPosterior(self.ps.freq, self.ps.power,
self.model, m=self.ps.m)
lpost.logprior = set_logprior(lpost, self.priors)
lp_test = lpost.logprior(t0)
lp = np.log(scipy.stats.norm(2.0, 1.0).pdf(t0))
assert lp == lp_test
def test_loglikelihood(self):
t0 = [2.0]
self.model.amplitude = t0[0]
mean_model = self.model(self.ps.freq)
loglike = -np.sum(np.log(mean_model)) - np.sum(self.ps.power/mean_model)
lpost = PSDPosterior(self.ps.freq, self.ps.power,
self.model, m=self.ps.m)
lpost.logprior = set_logprior(lpost, self.priors)
loglike_test = lpost.loglikelihood(t0, neg=False)
assert np.isclose(loglike, loglike_test)
def test_negative_loglikelihood(self):
t0 = [2.0]
self.model.amplitude = t0[0]
m = self.model(self.ps.freq[1:])
loglike = np.sum(self.ps.power[1:]/m + np.log(m))
lpost = PSDPosterior(self.ps.freq, self.ps.power,
self.model, m=self.ps.m)
lpost.logprior = set_logprior(lpost, self.priors)
loglike_test = lpost.loglikelihood(t0, neg=True)
assert np.isclose(loglike, loglike_test)
def test_posterior(self):
t0 = [2.0]
self.model.amplitude = t0[0]
m = self.model(self.ps.freq[1:])
lpost = PSDPosterior(self.ps.freq, self.ps.power,
self.model, m=self.ps.m)
lpost.logprior = set_logprior(lpost, self.priors)
post_test = lpost(t0, neg=False)
loglike = -np.sum(self.ps.power[1:]/m + np.log(m))
logprior = np.log(scipy.stats.norm(2.0, 1.0).pdf(t0))
post = loglike + logprior
assert np.isclose(post_test, post, atol=1.e-10)
def test_negative_posterior(self):
t0 = [2.0]
self.model.amplitude = t0[0]
m = self.model(self.ps.freq[1:])
lpost = PSDPosterior(self.ps.freq, self.ps.power,
self.model, m=self.ps.m)
lpost.logprior = set_logprior(lpost, self.priors)
post_test = lpost(t0, neg=True)
loglike = -np.sum(self.ps.power[1:]/m + np.log(m))
logprior = np.log(scipy.stats.norm(2.0, 1.0).pdf(t0))
post = -loglike - logprior
assert np.isclose(post_test, post, atol=1.e-10)
class TestPoissonPosterior(object):
@classmethod
def setup_class(cls):
nx = 1000000
cls.x = np.arange(nx)
cls.countrate = 10.0
cls.y = np.random.poisson(cls.countrate, size=cls.x.shape[0])
cls.model = models.Const1D()
p_amplitude = lambda amplitude: \
scipy.stats.norm(loc=cls.countrate, scale=cls.countrate).pdf(amplitude)
cls.priors = {"amplitude":p_amplitude}
def test_logprior_fails_without_prior(self):
lpost = PoissonPosterior(self.x, self.y, self.model)
with pytest.raises(AttributeError):
lpost.logprior([10])
def test_making_posterior(self):
lpost = PoissonPosterior(self.x, self.y, self.model)
lpost.logprior = set_logprior(lpost, self.priors)
assert lpost.x.all() == self.x.all()
assert lpost.y.all() == self.y.all()
def test_correct_number_of_parameters(self):
lpost = PoissonPosterior(self.x, self.y, self.model)
lpost.logprior = set_logprior(lpost, self.priors)
with pytest.raises(IncorrectParameterError):
lpost([2,3])
def test_logprior(self):
t0 = [10.0]
lpost = PoissonPosterior(self.x, self.y, self.model)
lpost.logprior = set_logprior(lpost, self.priors)
lp_test = lpost.logprior(t0)
lp = np.log(scipy.stats.norm(self.countrate, self.countrate).pdf(t0))
assert lp == lp_test
def test_loglikelihood(self):
t0 = [10.0]
self.model.amplitude = t0[0]
mean_model = self.model(self.x)
loglike = np.sum(-mean_model + self.y*np.log(mean_model) - scipy_gammaln(self.y+1))
lpost = PoissonPosterior(self.x, self.y, self.model)
lpost.logprior = set_logprior(lpost, self.priors)
loglike_test = lpost.loglikelihood(t0, neg=False)
assert np.isclose(loglike, loglike_test)
def test_negative_loglikelihood(self):
t0 = [10.0]
self.model.amplitude = t0[0]
mean_model = self.model(self.x)
loglike = -np.sum(-mean_model + self.y*np.log(mean_model) - scipy_gammaln(self.y+1))
lpost = PoissonPosterior(self.x, self.y, self.model)
lpost.logprior = set_logprior(lpost, self.priors)
loglike_test = lpost.loglikelihood(t0, neg=True)
assert np.isclose(loglike, loglike_test)
def test_posterior(self):
t0 = [10.0]
self.model.amplitude = t0[0]
mean_model = self.model(self.x)
lpost = PoissonPosterior(self.x, self.y, self.model)
lpost.logprior = set_logprior(lpost, self.priors)
post_test = lpost(t0, neg=False)
loglike = np.sum(-mean_model + self.y*np.log(mean_model) - scipy_gammaln(self.y+1))
logprior = np.log(scipy.stats.norm(self.countrate, self.countrate).pdf(t0))
post = loglike + logprior
assert np.isclose(post_test, post, atol=1.e-10)
def test_negative_posterior(self):
t0 = [10.0]
self.model.amplitude = t0[0]
mean_model = self.model(self.x)
lpost = PoissonPosterior(self.x, self.y, self.model)
lpost.logprior = set_logprior(lpost, self.priors)
post_test = lpost(t0, neg=True)
loglike = np.sum(-mean_model + self.y*np.log(mean_model) - scipy_gammaln(self.y+1))
logprior = np.log(scipy.stats.norm(self.countrate, self.countrate).pdf(t0))
post = -loglike - logprior
assert np.isclose(post_test, post, atol=1.e-10)
def test_counts_are_nan(self):
y = np.nan * np.ones(self.x.shape[0])
t0 = [10.0]
self.model.amplitude = t0[0]
mean_model = self.model(self.x)
lpost = PoissonPosterior(self.x, y, self.model)
lpost.logprior = set_logprior(lpost, self.priors)
assert np.isclose(lpost(t0), logmin, 1e-5)
class TestGaussianPosterior(object):
@classmethod
def setup_class(cls):
nx = 1000000
cls.x = np.arange(nx)
cls.countrate = 10.0
cls.cerr = 2.0
cls.y = np.random.normal(cls.countrate, cls.cerr, size=cls.x.shape[0])
cls.yerr = np.ones_like(cls.y)*cls.cerr
cls.model = models.Const1D()
p_amplitude = lambda amplitude: \
scipy.stats.norm(loc=cls.countrate, scale=cls.cerr).pdf(amplitude)
cls.priors = {"amplitude":p_amplitude}
def test_logprior_fails_without_prior(self):
lpost = GaussianPosterior(self.x, self.y, self.yerr, self.model)
with pytest.raises(AttributeError):
lpost.logprior([10])
def test_making_posterior(self):
lpost = GaussianPosterior(self.x, self.y, self.yerr, self.model)
lpost.logprior = set_logprior(lpost, self.priors)
assert lpost.x.all() == self.x.all()
assert lpost.y.all() == self.y.all()
def test_correct_number_of_parameters(self):
lpost = GaussianPosterior(self.x, self.y, self.yerr, self.model)
lpost.logprior = set_logprior(lpost, self.priors)
with pytest.raises(IncorrectParameterError):
lpost([2,3])
def test_logprior(self):
t0 = [10.0]
lpost = GaussianPosterior(self.x, self.y, self.yerr, self.model)
lpost.logprior = set_logprior(lpost, self.priors)
lp_test = lpost.logprior(t0)
lp = np.log(scipy.stats.norm(self.countrate, self.cerr).pdf(t0))
assert lp == lp_test
def test_loglikelihood(self):
t0 = [10.0]
self.model.amplitude = t0[0]
mean_model = self.model(self.x)
loglike = np.sum(-0.5*np.log(2.*np.pi) - np.log(self.yerr) - \
0.5*((self.y - mean_model)/self.yerr)**2.0)
lpost = GaussianPosterior(self.x, self.y, self.yerr, self.model)
lpost.logprior = set_logprior(lpost, self.priors)
loglike_test = lpost.loglikelihood(t0, neg=False)
assert np.isclose(loglike, loglike_test)
def test_negative_loglikelihood(self):
t0 = [10.0]
self.model.amplitude = t0[0]
mean_model = self.model(self.x)
loglike = -np.sum(-0.5*np.log(2.*np.pi) - np.log(self.yerr) - \
0.5*((self.y - mean_model)/self.yerr)**2.0)
lpost = GaussianPosterior(self.x, self.y, self.yerr, self.model)
lpost.logprior = set_logprior(lpost, self.priors)
loglike_test = lpost.loglikelihood(t0, neg=True)
assert np.isclose(loglike, loglike_test)
def test_posterior(self):
t0 = [10.0]
self.model.amplitude = t0[0]
mean_model = self.model(self.x)
lpost = GaussianPosterior(self.x, self.y, self.yerr, self.model)
lpost.logprior = set_logprior(lpost, self.priors)
post_test = lpost(t0, neg=False)
loglike = np.sum(-0.5*np.log(2.*np.pi) - np.log(self.yerr) - \
0.5*((self.y - mean_model)/self.yerr)**2.0)
logprior = np.log(scipy.stats.norm(self.countrate, self.cerr).pdf(t0))
post = loglike + logprior
assert np.isclose(post_test, post, atol=1.e-10)
def test_negative_posterior(self):
t0 = [10.0]
self.model.amplitude = t0[0]
mean_model = self.model(self.x)
lpost = GaussianPosterior(self.x, self.y, self.yerr, self.model)
lpost.logprior = set_logprior(lpost, self.priors)
post_test = lpost(t0, neg=True)
loglike = np.sum(-0.5*np.log(2.*np.pi) - np.log(self.yerr) - \
0.5*((self.y - mean_model)/self.yerr)**2.0)
logprior = np.log(scipy.stats.norm(self.countrate, self.cerr).pdf(t0))
post = -loglike - logprior
assert np.isclose(post_test, post, atol=1.e-10)
def test_counts_are_nan(self):
y = np.nan * np.ones(self.x.shape[0])
t0 = [10.0]
self.model.amplitude = t0[0]
lpost = GaussianPosterior(self.x, y, self.yerr, self.model)
lpost.logprior = set_logprior(lpost, self.priors)
assert np.isclose(lpost(t0), logmin, 1e-5)
class TestLaplacePosterior(object):
@classmethod
def setup_class(cls):
nx = 1000000
cls.x = np.arange(nx)
cls.countrate = 10.0
cls.cerr = 2.0
cls.y = np.random.normal(cls.countrate, cls.cerr, size=cls.x.shape[0])
cls.yerr = np.ones_like(cls.y)*cls.cerr
cls.model = models.Const1D()
p_amplitude = lambda amplitude: \
scipy.stats.norm(loc=cls.countrate, scale=cls.cerr).pdf(amplitude)
cls.priors = {"amplitude":p_amplitude}
def test_logprior_fails_without_prior(self):
lpost = LaplacePosterior(self.x, self.y, self.yerr, self.model)
with pytest.raises(AttributeError):
lpost.logprior([10])
def test_making_posterior(self):
lpost = LaplacePosterior(self.x, self.y, self.yerr, self.model)
lpost.logprior = set_logprior(lpost, self.priors)
assert lpost.x.all() == self.x.all()
assert lpost.y.all() == self.y.all()
def test_correct_number_of_parameters(self):
lpost = LaplacePosterior(self.x, self.y, self.yerr, self.model)
lpost.logprior = set_logprior(lpost, self.priors)
with pytest.raises(IncorrectParameterError):
lpost([2,3])
def test_logprior(self):
t0 = [10.0]
lpost = LaplacePosterior(self.x, self.y, self.yerr, self.model)
lpost.logprior = set_logprior(lpost, self.priors)
lp_test = lpost.logprior(t0)
lp = np.log(scipy.stats.norm(self.countrate, self.cerr).pdf(t0))
assert lp == lp_test
def test_loglikelihood(self):
t0 = [10.0]
self.model.amplitude = t0[0]
mean_model = self.model(self.x)
loglike = np.sum(-np.log(2.0*self.yerr) -
np.abs(self.y - mean_model)/self.yerr)
lpost = LaplacePosterior(self.x, self.y, self.yerr, self.model)
lpost.logprior = set_logprior(lpost, self.priors)
loglike_test = lpost.loglikelihood(t0, neg=False)
assert np.isclose(loglike, loglike_test)
def test_negative_loglikelihood(self):
t0 = [10.0]
self.model.amplitude = t0[0]
mean_model = self.model(self.x)
loglike = -np.sum(-np.log(2.0*self.yerr) -
np.abs(self.y - mean_model)/self.yerr)
lpost = LaplacePosterior(self.x, self.y, self.yerr, self.model)
lpost.logprior = set_logprior(lpost, self.priors)
loglike_test = lpost.loglikelihood(t0, neg=True)
assert np.isclose(loglike, loglike_test)
def test_posterior(self):
t0 = [10.0]
self.model.amplitude = t0[0]
mean_model = self.model(self.x)
lpost = LaplacePosterior(self.x, self.y, self.yerr, self.model)
lpost.logprior = set_logprior(lpost, self.priors)
post_test = lpost(t0, neg=False)
loglike = np.sum(-np.log(2.0*self.yerr) -
| np.abs(self.y - mean_model) | numpy.abs |
from __future__ import division, absolute_import, print_function
import sys
from itertools import product
import numpy as np
from numpy.core import zeros, float64
from numpy.testing import dec, TestCase, assert_almost_equal, assert_, \
assert_raises, assert_array_equal, assert_allclose, assert_equal
from numpy.core.multiarray import inner as inner_
DECPREC = 14
class TestInner(TestCase):
def test_vecself(self):
"""Ticket 844."""
# Inner product of a vector with itself segfaults or give meaningless
# result
a = zeros(shape = (1, 80), dtype = float64)
p = inner_(a, a)
assert_almost_equal(p, 0, decimal = DECPREC)
try:
import numpy.core._dotblas as _dotblas
except ImportError:
_dotblas = None
@dec.skipif(_dotblas is None, "Numpy is not compiled with _dotblas")
def test_blasdot_used():
from numpy.core import dot, vdot, inner, alterdot, restoredot
assert_(dot is _dotblas.dot)
assert_(vdot is _dotblas.vdot)
assert_(inner is _dotblas.inner)
assert_(alterdot is _dotblas.alterdot)
assert_(restoredot is _dotblas.restoredot)
def test_dot_2args():
from numpy.core import dot
a = | np.array([[1, 2], [3, 4]], dtype=float) | numpy.array |
from utilities.python_path_utility import append_to_pythonpath
append_to_pythonpath(['feature_extractors/reid_strong_baseline'
,'feature_extractors/ABD_Net'
,'detectors/mmdetection'
,'evaluation/py_motmetrics'
,'trackers/fair/src/lib'], __file__)
import argparse
import mmcv
import numpy as np
from trackers.deep_sort import DeepSort
from util import draw_bboxes
from tqdm import tqdm
from utilities.helper import xtylwh_to_xyxy
import warnings
import cv2
import logging
import json
from utilities.track_result_statistics import count_tracks
from utilities.python_path_utility import *
import pandas as pd
from feature_extractors.reid_strong_baseline.utils.logger import setup_logger
from datasets.mta_dataset_cam_iterator import get_cam_iterators
from tracking_utils.timer import Timer
class Run_tracker:
def __init__(self,args):
self.cfg = mmcv.Config.fromfile(args.config).root
self.cfg.general.config_basename = os.path.basename(args.config).replace(".py","")
self.use_original_wda = self.cfg.general.config_basename[:4] != "fair"
self.cfg.general.repository_root = os.path.abspath(os.path.dirname(__file__))
self.set_tracker_config_run_path()
self.set_track_features_folder()
#mmdetection does not put everything to the device that is being set in its function calls
#E.g. With torch.cuda.set_device(4) it will run without errors. But still using GPU 0
#With os.environ['CUDA_VISIBLE_DEVICES'] = '4' the visibility will be restricted to only the named GPUS starting internatlly from zero
os.environ['CUDA_VISIBLE_DEVICES'] = self.cfg.general.cuda_visible_devices
#Initializes the detector class by calling the constructor and creating the object
if self.use_original_wda:
from detectors.mmdetection_detector import Mmdetection_detector
self.detector = Mmdetection_detector(self.cfg)
self.deep_sort = DeepSort(self.cfg, use_original_wda=self.use_original_wda)
#Set up the logger
logger = setup_logger("mtmct", self.cfg.general.config_run_path, 0)
logger.info(args)
logger.info(json.dumps(self.cfg,sort_keys=True, indent=4))
def get_detections_path(self,cam_id):
detections_path_folder = os.path.join(self.cfg.general.config_run_path
, "detections")
os.makedirs(detections_path_folder, exist_ok=True)
detections_path = os.path.join(detections_path_folder, "detections_cam_{}.csv".format(cam_id))
return detections_path
def get_detections_frame_nos_path(self, cam_id):
detections_path_folder = os.path.join(self.cfg.general.config_run_path
, "detections")
os.makedirs(detections_path_folder, exist_ok=True)
detections_path = os.path.join(detections_path_folder, "detections_frame_nos_cam_{}.csv".format(cam_id))
return detections_path
def load_detections(self,cam_iterator):
self.detections_to_store = []
self.detections_frame_nos = []
detections_frame_nos_loaded = pd.DataFrame({ "frame_no_cam" : [] })
self.detections_loaded = pd.DataFrame()
self.detections_path = self.get_detections_path(cam_id=cam_iterator.cam_id)
self.detections_frame_nos_path = self.get_detections_frame_nos_path(cam_id=cam_iterator.cam_id)
if os.path.exists(self.detections_frame_nos_path):
detections_frame_nos_loaded = pd.read_csv(self.detections_frame_nos_path)
#This will assure that if the selection of frame nos is changed new detections have to be generated
cam_iterator_frame_nos = cam_iterator.get_all_frame_nos()
frame_nos_union_length = len(set(detections_frame_nos_loaded["frame_no_cam"]).intersection(set(cam_iterator_frame_nos)))
cam_iterator_frame_nos_length = len(cam_iterator_frame_nos)
if frame_nos_union_length == cam_iterator_frame_nos_length and os.path.exists(self.detections_path):
self.detections_loaded = pd.read_csv(self.detections_path)
def set_track_features_folder(self):
# Build the path where transformed track features will be stored for preparation of clustering.
self.cfg.general.track_features_folder = os.path.join(self.cfg.general.repository_root
, "work_dirs"
, "clustering"
, "config_runs"
, self.cfg.general.config_basename
, "pickled_appearance_features"
, "test")
os.makedirs(self.cfg.general.track_features_folder, exist_ok=True)
def set_tracker_config_run_path(self):
# Build the path where results and logging files etc. should be stored
self.cfg.general.config_run_path = os.path.join(self.cfg.general.repository_root
,"work_dirs"
, "tracker"
, "config_runs"
, self.cfg.general.config_basename)
os.makedirs(self.cfg.general.config_run_path, exist_ok=True)
def save_detections(self):
if len(self.detections_to_store) > 0:
detections_to_store_df = pd.DataFrame(self.detections_to_store)
detections_to_store_df = detections_to_store_df.astype({"frame_no_cam": int
, "id": int
, "x": int
, "y": int
, "w": int
, "h": int
, "score": float})
detections_to_store_df.to_csv(self.detections_path, index=False)
detections_frame_nos = pd.DataFrame(self.detections_frame_nos)
detections_frame_nos.to_csv(self.detections_frame_nos_path, index=False)
def store_detections_one_frame(self, frame_no_cam, xywh_bboxes, scores):
'''
"frame_no_cam,id,x,y,w,h,score"
:param xywh_bboxes:
:return:
'''
for index, xywh_bbox_score in enumerate(zip(xywh_bboxes,scores)):
xywh_bbox, score = xywh_bbox_score
self.detections_to_store.append({ "frame_no_cam" : frame_no_cam
, "id" : index
, "x" : xywh_bbox[0]
, "y" : xywh_bbox[1]
, "w" : xywh_bbox[2]
, "h" : xywh_bbox[3]
, "score" : score })
def img_callback_original_wda(self, dataset_img):
if len(self.detections_loaded) > 0:
detections_current_frame = self.detections_loaded[self.detections_loaded["frame_no_cam"] == dataset_img.frame_no_cam]
scores = detections_current_frame["score"].tolist()
bboxes_xtlytlwh = list(zip(detections_current_frame["x"], detections_current_frame["y"],detections_current_frame["w"],detections_current_frame["h"]))
else:
bboxes_xtlytlwh, scores = self.detector.detect(dataset_img.img)
self.store_detections_one_frame(dataset_img.frame_no_cam, bboxes_xtlytlwh, scores)
self.detections_frame_nos.append({ "frame_no_cam" : dataset_img.frame_no_cam })
draw_img = dataset_img.img
if bboxes_xtlytlwh is not None:
outputs = self.deep_sort.update(bboxes_xtlytlwh, scores, dataset_img)
if len(outputs) > 0:
bboxes_xtylwh = outputs[:, :4]
bboxes_xyxy = [ xtylwh_to_xyxy(bbox_xtylwh,dataset_img.img_dims) for bbox_xtylwh in bboxes_xtylwh ]
identities = outputs[:, -2]
detection_idxs = outputs[:, -1]
draw_img = draw_bboxes(dataset_img.img, bboxes_xyxy, identities)
for detection_idx, person_id, bbox in zip(detection_idxs,identities,bboxes_xyxy):
print('%d,%d,%d,%d,%d,%d,%d,%d' % (
dataset_img.frame_no_cam, dataset_img.cam_id, person_id, detection_idx,int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])), file=self.track_results_file)
if self.cfg.general.display_viewer:
cv2.imshow("Annotation Viewer", draw_img)
cv2.waitKey(1)
def img_callback(self, dataset_img):
draw_img = dataset_img.img
outputs = self.deep_sort.update_with_fair_detections(dataset_img)
if len(outputs) > 0:
bboxes_xtylwh = outputs[:, :4]
bboxes_xyxy = [ xtylwh_to_xyxy(bbox_xtylwh,dataset_img.img_dims) for bbox_xtylwh in bboxes_xtylwh ]
identities = outputs[:, -2]
detection_idxs = outputs[:, -1]
draw_img = draw_bboxes(dataset_img.img, bboxes_xyxy, identities)
for detection_idx, person_id, bbox in zip(detection_idxs,identities,bboxes_xyxy):
print('%d,%d,%d,%d,%d,%d,%d,%d' % (
dataset_img.frame_no_cam, dataset_img.cam_id, person_id, detection_idx,int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])), file=self.track_results_file)
if self.cfg.general.display_viewer:
cv2.imshow("Annotation Viewer", draw_img)
cv2.waitKey(1)
def run_on_cam_images(self,cam_iterator):
timer = Timer()
for image in cam_iterator:
self.pbar_tracker.update()
timer.tic()
if self.use_original_wda:
self.img_callback_original_wda(image)
else:
self.img_callback(image)
timer.toc()
return timer.average_time, timer.calls
@staticmethod
def get_cam_iterator_len_sum(cam_image_iterators):
overall_len = 0
for cam_iterator in cam_image_iterators:
overall_len += len(cam_iterator)
return overall_len
def get_track_results_path(self,cam_id):
tracker_results_folder = os.path.join(self.cfg.general.config_run_path,"tracker_results")
os.makedirs(tracker_results_folder,exist_ok=True)
return os.path.join(tracker_results_folder,"track_results_{}.txt".format(cam_id))
def run_on_dataset(self):
logger = logging.getLogger("mtmct")
logger.info("Starting tracking on dataset.")
cam_iterators = get_cam_iterators(self.cfg, self.cfg.data.source.cam_ids)
frame_count_all_cams = self.get_cam_iterator_len_sum(cam_iterators)
self.pbar_tracker = tqdm(total=frame_count_all_cams)
timer_avgs, timer_calls = [], []
for cam_iterator in cam_iterators:
logger.info("Processing cam {}".format(cam_iterator.cam_id))
self.track_results_path = self.get_track_results_path(cam_iterator.cam_id)
logger.info(self.track_results_path)
self.track_results_file = open(self.track_results_path, 'w')
print("frame_no_cam,cam_id,person_id,detection_idx,xtl,ytl,xbr,ybr", file=self.track_results_file)
if self.use_original_wda:
self.load_detections(cam_iterator)
ta, tc = self.run_on_cam_images(cam_iterator)
timer_avgs.append(ta)
timer_calls.append(tc)
self.track_results_file.close()
if self.use_original_wda:
self.save_detections()
track_count_string = count_tracks(self.track_results_path)
logger.info(track_count_string)
timer_avgs = np.asarray(timer_avgs)
timer_calls = np.asarray(timer_calls)
all_time = | np.dot(timer_avgs, timer_calls) | numpy.dot |
import pytest
import numpy as np
import pandas as pd
import geopandas
from shapely import geometry
from disarm_gears.frames import TilePattern
from disarm_gears.util.buffers import voronoi_polygons
# Inputs
n_points = 10
g_points = np.random.uniform(0, 1, n_points * 2).reshape(n_points, -1)
vor = voronoi_polygons(g_points)
b_attrib = np.random.random(25)
g_attrib_1 = np.random.random(10)
g_attrib_2 = np.random.random(40).reshape(10, -1)
g_attrib_3 = pd.DataFrame({li: ci for li,ci in zip(['a', 'b', 'c', 'd'], g_attrib_2.T)})
X = np.vstack([g_points.copy()[5:], np.array([10, 10])])
B = geopandas.GeoDataFrame({'id': [0], 'geometry': [geometry.Polygon(((0.2, 0.3), (0.2, 0.8),
(0.7, 0.8), (0.2, 0.3)))]})
B2 = geopandas.GeoDataFrame({'id': [0, 1], 'geometry': [geometry.Polygon(((0.2, 0.3), (0.2, 0.8),
(0.7, 0.8), (0.2, 0.3))),
geometry.Polygon(((0.2, 0.3), (0.7, 0.3),
(0.7, 0.8), (0.2, 0.3)))]})
# Demo object used repeatedly
sf_0 = TilePattern(geometries=vor.geometry, attributes=None, crs=None)
def test_inputs():
# Check bad inputs
with pytest.raises(AssertionError):
TilePattern(geometries=g_points)
with pytest.raises(NotImplementedError):
TilePattern(geometries=vor.geometry, attributes=None, crs=0)
def test_outputs():
# Check output types
assert isinstance(sf_0.region, geopandas.GeoDataFrame)
assert isinstance(sf_0.centroids, np.ndarray)
# Check sf.region shape
sf_0.region.ndim == 2
sf_0.region.shape[0] == n_points
sf_0.region.shape[1] == 1
# Check sf.region.columns
'geometry' in sf_0.region.columns
# Check box type
assert isinstance(sf_0.box, pd.DataFrame)
sf_0.box.ndim == 2
sf_0.box.shape[0] == 2
sf_0.box.shape[1] == 2
def test_attributes_array():
assert sf_0.attributes_array() is None
_attr = np.random.uniform(0, 100, g_points.size).reshape(-1, 2)
sf_1 = TilePattern(geometries=vor.geometry, attributes=_attr, crs=None)
sf_attr = sf_1.attributes_array()
assert isinstance(sf_attr, np.ndarray)
assert sf_attr.shape[0] == _attr.shape[0]
assert sf_attr.shape[1] == _attr.shape[1]
def test_set_boundary():
sf_1 = TilePattern(geometries=vor.geometry, attributes=None, crs=None)
with pytest.raises(AssertionError):
sf_1.set_boundary(B=B.geometry[0])
sf_1.set_boundary(B=B)
assert isinstance(sf_1.boundary, geopandas.GeoDataFrame)
sf_2 = TilePattern(geometries=vor.geometry, attributes=None, crs=None)
sf_2.set_boundary(B2)
assert sf_1.region.shape[0] < sf_2.region.shape[0]
def test_locate():
ix = sf_0.locate(X=X)
assert isinstance(ix, np.ndarray)
assert ix.ndim == 1
assert ix.size == X.shape[0]
assert ix[-1] == -1
assert np.all(ix[:-1] - np.arange(5, 10) == 0)
def test_points_to_frame():
nx = X.shape[0]
summary = sf_0.points_to_frame(X=X, group_by=None)
assert isinstance(summary, pd.DataFrame)
assert summary.ndim == 2
assert summary.shape[0] == nx - 1
assert summary['var_0'].sum() == nx - 1
assert np.unique(summary['tile']).size == nx - 1
with pytest.raises(AssertionError):
sf_0.points_to_frame(X=np.vstack([X] * 3),
group_by=[['A'] * 2 * nx + ['B'] * nx])
summary = sf_0.points_to_frame(X=np.vstack([X] * 3),
group_by=np.hstack([['A'] * 2 * nx + ['B'] * nx]))
assert summary.shape[0] == 2 * (nx - 1)
assert 'var_0' in summary.columns
assert | np.all(summary.loc[summary.group == 'A', 'var_0'] == 2) | numpy.all |
import numpy as np
import pandas as pd
from scipy.stats import norm, percentileofscore
from tqdm.notebook import tqdm
def rv_cc_estimator(sample,n=22):
"""
Realized volatility close to close calculation. Returns a time series of the realized volatility.
sample: series or dataframe of closing prices indexed by date
n: sample size period for the volatility
"""
sample_clean = sample.dropna()
returns = np.divide(sample_clean, sample_clean.shift(1))
log_returns = np.log(returns)
ann_log_returns = 252*np.power(log_returns,2)/n
return 100 * np.sqrt(ann_log_returns.rolling(window=n,min_periods=n).sum())
def cc_estimator(sample,n=22,days=1):
combined_rv = pd.Series()
sample_clean = sample.dropna()
for i in range(days):
staggered_samples = sample_clean[i::days]
returns = np.divide(staggered_samples, staggered_samples.shift(1))
log_returns = np.log(returns)
ann_log_returns = 252*np.power(log_returns,2)/n/days
sample_rv = 100 * np.sqrt(ann_log_returns.rolling(window=n,min_periods=n).sum())
combined_rv = pd.concat([combined_rv, sample_rv])
return combined_rv.sort_index()
def calc_period_var(sample, return_period=22, lookback=66):
"""
A period return's move normalized. Calculated as the squared move (variance) scaled by the period
sample: series or dataframe of closing prices indexed by date
"""
sample_clean = sample.dropna()
lookback_ret = sample_clean.pct_change(periods=return_period)
return (lookback_ret**2).rolling(window=lookback).mean() * 250 / return_period
def calc_var_ratio(sample, return_period=22, period_min=3, day_min=66):
"""
The variance ratio based on the normalized historical returns over a given rolling return period ratioed to the daily historical returns
sample: series or dataframe of closing prices indexed by date
return period:
"""
lookback = max(return_period * period_min, day_min)
period_var = calc_period_var(sample, return_period=return_period, lookback=lookback)
daily_var = calc_period_var(sample, return_period=1, lookback=lookback)
return period_var / daily_var
def calc_lfev(sample, return_period=22, period_min=3, day_min=66):
lookback = max(return_period * period_min, day_min)
period_var = calc_period_var(sample, return_period=return_period, lookback=lookback)
daily_var = calc_period_var(sample, return_period=1, lookback=lookback)
return (np.sqrt(period_var) - np.sqrt(daily_var)) * 100
def move_volatility(prices, days=66):
abs_move = (prices / prices.shift(days) - 1)
high_low = (prices.rolling(days+1).max() - prices.rolling(days+1).min()) / prices.shift(days)
return abs_move / high_low * np.abs(abs_move) * 100
def move_volatility_range(prices, days=66):
abs_move = (prices / prices.shift(days) - 1)
high_prices = prices.rolling(days+1).max()
low_prices = prices.rolling(days+1).min()
close_dist_high_low = ((high_prices - prices.shift(days)) + (low_prices - prices.shift(days))) / prices.shift(days)
high_low = (high_prices - low_prices) / prices.shift(days)
return close_dist_high_low * (0.5 * (np.abs(abs_move) + high_low)) / high_low * 100
def generate_returns_dict(prices, undl_list, return_days):
returns = {}
for u in undl_list:
returns[u] = pd.DataFrame()
for i in return_days:
close_prices = prices[u, 'Close'].dropna()
returns[u][i] = (close_prices / close_prices.shift(i) - 1) * 100
return returns
def rolling_trend(prices, undl_list, return_days, smoothing=5):
'''Determines the trend by blending the returns across different periods and smooths the results.'''
avg_returns_dict = {}
returns_summary = {}
returns_dict = generate_returns_dict(prices, undl_list, return_days)
for u in undl_list:
avg_returns_dict[u] = pd.DataFrame()
for i in return_days:
avg_returns_dict[u]['{}D Trend'.format(i)] = returns_dict[u][i].dropna().rolling(smoothing).mean() / np.sqrt(i)
avg_returns_dict[u]['Average Trend'] = avg_returns_dict[u].dropna().mean(axis=1)
if len(avg_returns_dict[u].dropna()) > 0:
returns_summary[u] = avg_returns_dict[u]['Average Trend'].dropna()[-1]
returns_summary = pd.Series(returns_summary)
return returns_summary, avg_returns_dict
def spot_stats(sample, n=260):
"""
Simple spot statistics returning the distance in % terms from the last spot to the max spot in the period, distance to min spot, and current percentile in min to max.
sample: series or dataframe of closing prices
n: historical lookback period.
"""
spot_window = sample.dropna()[-n:]
percentile = percentileofscore(spot_window, spot_window[-1])
high = spot_window.max()
low = spot_window.min()
max_pct = (high / spot_window[-1] - 1) * 100
min_pct = (low / spot_window[-1] - 1) * 100
return max_pct, min_pct, percentile
def past_spot_ranges(sample, n=22, haircut=0.2, intraday=True):
'''Finds Returns the past n spot range based on max/min of the period'''
if intraday:
sample_max = (sample['High'].rolling(n).max() / sample['Close'].shift(n) - 1) * 100
sample_min = (sample['Low'].rolling(n).min() / sample['Close'].shift(n) - 1) * 100
else:
sample_max = (sample['Close'].rolling(n).max() / sample['Close'].shift(n) - 1) * 100
sample_min = (sample['Close'].rolling(n).min() / sample['Close'].shift(n) - 1) * 100
delta_scale = 1 - haircut # Set a more conservative estimate of the range.
return pd.concat([abs(sample_max) * delta_scale, abs(sample_min) * delta_scale], axis=1).max(axis=1)
def past_abs_returns(sample, n=5):
return np.abs((1 - sample['Close'].shift(n) / sample['Close']) * 100)
def varvolbreakeven(var, vol):
b = -1
a = 1 / (2 * var)
c = vol - var / 2
breakeven1 = (-b + np.sqrt(b**2 - 4 * a * c)) / (2 * a)
breakeven2 = (-b - np.sqrt(b**2 - 4 * a * c)) / (2 * a)
return breakeven1, breakeven2
def zscore_calc(hist_data, live_data):
return (live_data - hist_data.mean()) / hist_data.std()
def var_payout(realized,strike):
return 0.5 * (realized**2 / strike - strike)
def vol_payout(realized, strike):
return realized - strike
class BlackScholes:
def __init__(self, s, k, r, q, vol, t, payoff):
"""vol is expressed in %. eg enter 16v as 16, not 0.16"""
self.s = s
self.k = k
self.r = r
self.q = q
self.vol = vol / 100
self.t = t
self.payoff = payoff
def d1(self):
return (np.log(self.s / self.k) +
(self.r - self.q + self.vol ** 2 / 2) * \
self.t) / (self.vol * np.sqrt(self.t))
def d2(self):
return (np.log(self.s / self.k) +
(self.r - self.q - self.vol ** 2 / 2) * \
self.t) / (self.vol * np.sqrt(self.t))
def phi(self, x):
return np.exp(-x ** 2 / 2) / np.sqrt(2 * np.pi)
def price(self):
if self.payoff.lower() == 'put':
return self.put_price()
else:
return self.call_price()
def call_price(self):
if self.t == 0:
return 0
return self.s * np.exp(-self.q * self.r) * norm.cdf(self.d1()) - np.exp(-self.r * self.t) * self.k * norm.cdf(self.d2())
def put_price(self):
if self.t == 0:
return 0
return np.exp(-self.r * self.t) * self.k * norm.cdf(-self.d2()) - self.s * np.exp(-self.q * self.r) * norm.cdf(-self.d1())
def delta(self):
if self.t == 0:
return 0
if self.payoff.lower() == 'put':
return -np.exp(-self.q * self.r) * norm.cdf(-self.d1())
else:
return np.exp(-self.q * self.r) * norm.cdf(self.d1())
def vega(self):
if self.t == 0:
return 0
return self.s * np.exp(-self.q * self.r) * self.phi(self.d1()) * np.sqrt(self.t)
def alt_vega(self):
return self.k * np.exp(-self.r * self.t) * self.phi(self.d2()) * | np.sqrt(self.t) | numpy.sqrt |
import numpy as np
import scipy.signal as sps
from sklearn.preprocessing import MinMaxScaler, StandardScaler
def resample_data(gsrdata, prevSR, newSR):
'''Calculates rolling mean
Function to calculate moving average over the passed data
Parameters
----------
gsrdata : 1-d array
array containing the gsr data
prevSR : int or float
the previous sample rate of the data
newSR : int or float
the new sample rate of the data
Returns
-------
data : 1-d array
array containing the resampled data
'''
number_of_samples = int(round(len(gsrdata) * float(newSR) / prevSR))
data = sps.resample(gsrdata, number_of_samples)
return data
def normalization(gsrdata):
'''Min Max normalization
Function to calculate normalized gsr data
Parameters
----------
gsrdata : 1-d array
array containing the gsr data
Returns
-------
n_gsrdata : 1-d array
normalized gsr data
'''
gsrdata = gsrdata - (np.min(gsrdata))
gsrdata /= (np.max(gsrdata) - np.min(gsrdata))
n_gsrdata = gsrdata
return n_gsrdata
def rolling_mean(data, windowsize, sample_rate):
'''calculates rolling mean
Function to calculate moving average over the passed data
Parameters
----------
data : 1-d array
array containing the gsr data
windowsize : int or float
the moving average window size in seconds
sample_rate : int or float
the sample rate of the data set
Returns
-------
rol_mean : 1-d array
array containing computed rolling mean
'''
avg_hr = (np.mean(data))
data_arr = np.array(data)
t_windowsize = int(windowsize*sample_rate)
t_shape = data_arr.shape[:-1] + (data_arr.shape[-1] - t_windowsize + 1, t_windowsize)
t_strides = data_arr.strides + (data_arr.strides[-1],)
sep_win = np.lib.stride_tricks.as_strided(data_arr, shape=t_shape, strides=t_strides)
rol_mean = np.mean(sep_win, axis=1)
missing_vals = np.array([avg_hr for i in range(0, int(abs(len(data_arr) - len(rol_mean))/2))])
rol_mean = np.insert(rol_mean, 0, missing_vals)
rol_mean = np.append(rol_mean, missing_vals)
#only to catch length errors that sometimes unexplicably occur.
##Generally not executed, excluded from testing and coverage
if len(rol_mean) != len(data): # pragma: no cover
lendiff = len(rol_mean) - len(data)
if lendiff < 0:
rol_mean = np.append(rol_mean, 0)
else:
rol_mean = rol_mean[:-1]
return rol_mean
def min_max_scale(data):
"""
Min-Max scale the data in the range [-1.0, 1.0]
The data is expected to have the shape (samples, length, channels)
Return the scaled data in the original shape.
"""
_, segment_length, n_channels = data.shape
# flatten the data
features = data.reshape(-1, segment_length * n_channels)
# scale the data
scaler = MinMaxScaler(feature_range=(-1.0, 1.0))
features = scaler.fit_transform(features)
# reshape the data
features = features.reshape(-1, n_channels, segment_length)
features = np.transpose(features, (0, 2, 1))
return features
def standard_scaler(data):
""" Normalize the data to have zero mean and unit standard devication
The data is expected to have the shape (n_samples, segment_length, n_channels)
Return the scaled data in the original shape.
"""
_, segment_length, n_channels = data.shape
# flatten the data
features = data.reshape(-1, segment_length * n_channels)
# scale the data
scaler = StandardScaler(with_mean=False, with_std=False)
features = scaler.fit_transform(features)
# reshape the data
features = features.reshape(-1, n_channels, segment_length)
features = | np.transpose(features, (0, 2, 1)) | numpy.transpose |
import numpy as np
import os
import requests
from matplotlib import pyplot as plt
from matplotlib import cm
from lmfit.models import Model
from sklearn.cluster import KMeans
from shapely.geometry import Polygon
from radio_beam.commonbeam import getMinVolEllipse
from scipy import ndimage as ndi
from scipy.spatial import distance
from skimage import io
from skimage.measure import EllipseModel
from skimage.color import rgb2gray
from skimage import filters, util
from skimage.morphology import disk, skeletonize, ball
from skimage.measure import approximate_polygon
from skimage import transform
from PIL import Image, ImageDraw, ImageFilter, ImageOps
from sklearn.linear_model import LinearRegression
from scipy import ndimage
import copy
import cv2
from scipy.spatial import ConvexHull
import sys
import logging
import time
import glob
from logging import StreamHandler, Formatter
from src.cfg import CfgAnglesNames, CfgBeamsNames, CfgDataset
handler = StreamHandler(stream=sys.stdout)
handler.setFormatter(Formatter(fmt='[%(asctime)s: %(levelname)s] %(message)s'))
logger = logging.getLogger(__name__)
logger.addHandler(handler)
file_path = os.getcwd() + '/utils.py'
class grainPreprocess():
@classmethod
def imdivide(cls, image: np.ndarray, h: int, side: str) -> np.ndarray:
"""
:param image: ndarray (height,width,channels)
:param h: int scalar
:param side: str 'left'
:return: ndarray (height,width/2,channels)
"""
#
# возвращает левую или правую часть полученного изображения
#
height, width = image.shape
sides = {'left': 0, 'right': 1}
shapes = [(0, height - h, 0, width // 2), (0, height - h, width // 2, width)]
shape = shapes[sides[side]]
return image[shape[0]:shape[1], shape[2]:shape[3]]
@classmethod
def combine(cls, image: np.ndarray, h: int, k=0.5) -> np.ndarray:
"""
:param image: ndarray (height,width,channels)
:param h: int scalar
:param k: float scalar
:return: ndarray (height,width/2,channels)
"""
#
# накладывает левую и правые части изображения
# если k=1, то на выходе будет левая часть изображения, если k=0, то будет правая часть
#
left_img = cls.imdivide(image, h, 'left')
right_img = cls.imdivide(image, h, 'right')
l = k
r = 1 - l
gray = np.array(left_img) * l
gray += np.array(right_img) * r
return gray.astype('uint8')
@classmethod
def do_otsu(cls, img: np.ndarray) -> np.ndarray:
"""
:param img: ndarray (height,width,channels)
:return: ndarray (height,width), Boolean
"""
#
# бинаризация отсу
#
global_thresh = filters.threshold_otsu(img)
binary_global = img > global_thresh
return binary_global.astype('uint8')
@classmethod
def image_preprocess(cls, image: np.ndarray) -> np.ndarray:
"""
:param image: ndarray (height,width,channels)
:return: ndarray (height,width)
"""
#
# комбинация медианного фильтра, биноризации и гражиента
# у зерен значение пикселя - 0, у регионов связ. в-ва - 127,а у их границы - 254
#
unsigned_image = util.img_as_ubyte(image)
denoised = filters.rank.median(unsigned_image, ball(3))
binary = cls.do_otsu(denoised)
grad = abs(filters.rank.gradient(binary, ball(1)))
bin_grad = (1 - binary + grad) * 127
return bin_grad.astype(np.uint8)
@classmethod
def image_preprocess_kmeans(cls, image: np.ndarray, h=135, k=1, n_clusters=3, pos=1) -> np.ndarray:
"""
:param image: array (height,width,channels)
:param h: int scalar
:param k: float scalar
:param n_clusters: int scalar
:param pos: int scalar, cluster index
:return: ndarray (height,width)
"""
#
# выделение границ при помощи кластеризации
# и выравнивание шума медианным фильтром
# pos отвечает за выбор кластера, который будет отображен на возвращенном изображении
#
combined = cls.combine(image, h, k)
clustered, colors = grainMorphology.kmeans_image(combined, n_clusters)
cluster = clustered == colors[pos]
cluster = np.array(cluster * 255, dtype='uint8')
new_image = filters.median(cluster, disk(2))
return new_image
@classmethod
def read_preprocess_data(cls, images_dir, max_images_num_per_class=100, preprocess=False, save=False,
crop_bottom=False,
h=135, resize=True, resize_shape=None,
save_name='all_images.npy'):
folders_names = glob.glob(images_dir + '*')
images_paths = [glob.glob(folder_name + '/*')[:max_images_num_per_class] for folder_name in folders_names]
l = np.array(images_paths).flatten().shape[0]
# Initial call to print 0% progress
GrainLogs.printProgressBar(0, l, prefix='Progress:', suffix='Complete', length=50)
preproc_images = []
start_time = time.time()
step = 0
for i, images_list_paths in enumerate(images_paths):
preproc_images.append([])
for image_path in images_list_paths:
step += 1
image = io.imread(image_path).astype(np.uint8)
# вырезает нижнюю полоску фотографии с линекой и тд
if crop_bottom:
image = grainPreprocess.combine(image, h)
# ресайзит изображения
if resize:
if resize_shape is not None:
image = transform.resize(image, resize_shape)
else:
print('No resize shape')
# последовательно применяет фильтры (медианный, отсу, собель и тд)
if preprocess:
image = grainPreprocess.image_preprocess(image)
end_time = time.time()
eta = round((end_time - start_time) * (l - step), 1)
GrainLogs.printProgressBar(step, l, eta=eta, prefix='Progress:', suffix='Complete', length=50)
start_time = time.time()
preproc_images[i].append(image)
if save:
| np.save(save_name, preproc_images) | numpy.save |
"""HOOMD snapshot format."""
import operator
from collections import namedtuple
import numpy as np
import packaging.version
import parmed as pmd
from mbuild.box import Box
from mbuild.compound import Compound, Particle
from mbuild.utils.geometry import coord_shift
from mbuild.utils.io import import_
from mbuild.utils.sorting import natural_sort
hoomd = import_("hoomd")
__all__ = ["to_hoomdsnapshot", "from_snapshot"]
def _get_hoomd_version():
if "version" in dir(hoomd):
return packaging.version.parse(hoomd.version.version)
else:
return packaging.version.parse(hoomd.__version__)
def from_snapshot(snapshot, scale=1.0):
"""Convert a Snapshot to a Compound.
Snapshot can be a hoomd.Snapshot or a gsd.hoomd.Snapshot.
Parameters
----------
snapshot : hoomd.Snapshot or gsd.hoomd.Snapshot
Snapshot from which to build the mbuild Compound.
scale : float, optional, default 1.0
Value by which to scale the length values
Returns
-------
comp : Compound
Note
----
GSD and HOOMD snapshots center their boxes on the origin (0,0,0), so the
compound is shifted by half the box lengths
"""
comp = Compound()
bond_array = snapshot.bonds.group
n_atoms = snapshot.particles.N
if "SnapshotSystemData_float" in dir(hoomd._hoomd) and isinstance(
snapshot, hoomd._hoomd.SnapshotSystemData_float
):
# hoomd v2
box = snapshot.box
comp.box = Box.from_lengths_tilt_factors(
lengths=np.array([box.Lx, box.Ly, box.Lz]) * scale,
tilt_factors=np.array([box.xy, box.xz, box.yz]),
)
else:
# gsd / hoomd v3
box = np.asarray(snapshot.configuration.box)
comp.box = Box.from_lengths_tilt_factors(
lengths=box[:3] * scale, tilt_factors=box[3:]
)
# GSD and HOOMD snapshots center their boxes on the origin (0,0,0)
shift = np.array(comp.box.lengths) / 2
# Add particles
for i in range(n_atoms):
name = snapshot.particles.types[snapshot.particles.typeid[i]]
xyz = snapshot.particles.position[i] * scale + shift
charge = snapshot.particles.charge[i]
atom = Particle(name=name, pos=xyz, charge=charge)
comp.add(atom, label=str(i))
# Add bonds
particle_dict = {idx: p for idx, p in enumerate(comp.particles())}
for i in range(bond_array.shape[0]):
atom1 = int(bond_array[i][0])
atom2 = int(bond_array[i][1])
comp.add_bond([particle_dict[atom1], particle_dict[atom2]])
return comp
def to_hoomdsnapshot(
structure,
ref_distance=1.0,
ref_mass=1.0,
ref_energy=1.0,
rigid_bodies=None,
shift_coords=True,
write_special_pairs=True,
auto_scale=False,
parmed_kwargs={},
hoomd_snapshot=None,
):
"""Convert a Compound or parmed.Structure to hoomd.Snapshot.
Parameters
----------
structure : parmed.Structure
ParmEd Structure object
ref_distance : float, optional, default=1.0
Reference distance for conversion to reduced units
ref_mass : float, optional, default=1.0
Reference mass for conversion to reduced units
ref_energy : float, optional, default=1.0
Reference energy for conversion to reduced units
rigid_bodies : list of int, optional, default=None
List of rigid body information. An integer value is required for
each atom corresponding to the index of the rigid body the particle
is to be associated with. A value of None indicates the atom is not
part of a rigid body.
shift_coords : bool, optional, default=True
Shift coordinates from (0, L) to (-L/2, L/2) if necessary.
auto_scale : bool, optional, default=False
Automatically use largest sigma value as ref_distance,
largest mass value as ref_mass
and largest epsilon value as ref_energy
write_special_pairs : bool, optional, default=True
Writes out special pair information necessary to correctly use
the OPLS fudged 1,4 interactions in HOOMD.
hoomd_snapshot : hoomd.Snapshot, optional, default=None
Initial snapshot to which to add the ParmEd structure object.
The box information of the initial snapshot will be overwritten.
(useful for rigid bodies)
Returns
-------
hoomd_snapshot : hoomd.Snapshot
ReferenceValues : namedtuple
Values used in scaling
Notes
-----
Force field parameters are not written to the hoomd_snapshot
"""
if not isinstance(structure, (Compound, pmd.Structure)):
raise ValueError(
"You are trying to create a hoomd.Snapshot from a "
f"{type(structure)} please pass a Compound or pmd.Structure"
)
elif isinstance(structure, Compound):
structure = structure.to_parmed(**parmed_kwargs)
hoomd_version = _get_hoomd_version()
if hoomd_version.major == 2 and not hoomd.context.current:
hoomd.context.initialize("")
if auto_scale:
ref_mass = max([atom.mass for atom in structure.atoms])
pair_coeffs = list(
set(
(atom.type, atom.epsilon, atom.sigma)
for atom in structure.atoms
)
)
ref_energy = max(pair_coeffs, key=operator.itemgetter(1))[1]
ref_distance = max(pair_coeffs, key=operator.itemgetter(2))[2]
ReferenceValues = namedtuple("ref_values", ["distance", "mass", "energy"])
ref_values = ReferenceValues(ref_distance, ref_mass, ref_energy)
xyz = np.array([[atom.xx, atom.xy, atom.xz] for atom in structure.atoms])
if shift_coords:
xyz = coord_shift(xyz, structure.box[:3])
# Get box information
if np.allclose(structure.box[3:6], np.array([90, 90, 90])):
lx, ly, lz = structure.box[:3] / ref_distance
xy, xz, yz = 0, 0, 0
else:
a, b, c = structure.box[0:3] / ref_distance
alpha, beta, gamma = np.radians(structure.box[3:6])
lx = a
xy = b * np.cos(gamma)
xz = c * np.cos(beta)
ly = | np.sqrt(b ** 2 - xy ** 2) | numpy.sqrt |
"""PISA data container"""
from __future__ import absolute_import, division, print_function
import argparse
from collections.abc import Mapping, Iterable, Sequence
from collections import OrderedDict
import copy
import numpy as np
from pisa import FTYPE
from pisa.core.binning import OneDimBinning, MultiDimBinning
from pisa.utils.fileio import from_file
from pisa.utils.log import logging
__all__ = [
"NU_FLAVORS",
"NU_INTERACTIONS",
"OUTPUT_NUFLAVINT_KEYS",
"LEGACY_FLAVKEY_XLATION",
"EventsPi",
"split_nu_events_by_flavor_and_interaction",
"fix_oppo_flux",
"main",
]
__author__ = "<NAME>"
__license__ = """Copyright (c) 2014-2018, The IceCube Collaboration
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
# Define the flavors and interactions for neutrino events
NU_FLAVORS = OrderedDict(
nue=12, nuebar=-12, numu=14, numubar=-14, nutau=16, nutaubar=-16
)
NU_INTERACTIONS = OrderedDict(cc=1, nc=2)
OUTPUT_NUFLAVINT_KEYS = tuple(
"%s_%s" % (fk, ik)
for fk, fc in NU_FLAVORS.items()
for ik, ic in NU_INTERACTIONS.items()
)
LEGACY_FLAVKEY_XLATION = dict(
nue="nue",
nuebar="nuebar",
nue_bar="nuebar",
numu="numu",
numubar="numubar",
numu_bar="numubar",
nutau="nutau",
nutaubar="nutaubar",
nutau_bar="nutaubar",
)
# Backwards cmpatiblity fixes
OPPO_FLUX_LEGACY_FIX_MAPPING_NU = {
"nominal_nue_flux" : "neutrino_nue_flux",
"nominal_numu_flux" : "neutrino_numu_flux",
"nominal_nuebar_flux" : "neutrino_oppo_nue_flux",
"nominal_numubar_flux" : "neutrino_oppo_numu_flux",
}
OPPO_FLUX_LEGACY_FIX_MAPPING_NUBAR = {
"nominal_nue_flux" : "neutrino_oppo_nue_flux",
"nominal_numu_flux" : "neutrino_oppo_numu_flux",
"nominal_nuebar_flux" : "neutrino_nue_flux",
"nominal_numubar_flux" : "neutrino_numu_flux",
}
def append_arrays_dict(key, val, sdict):
'''
Helper function for appending multiple dicts of arrays (e.g. from
multiple input files) into a single dict of arrays
'''
if isinstance(val, Mapping):
# Handle sub-dict
for key2, val2 in val.items() :
if key not in sdict :
sdict[key] = OrderedDict()
append_arrays_dict(key2, val2, sdict[key])
else :
# Have now reached a variable
assert isinstance(val, np.ndarray), "'%s' is not an array, is a %s" % (key, type(val))
if key in sdict :
sdict[key] = np.append(sdict[key], val)
else :
sdict[key] = val
class EventsPi(OrderedDict):
"""
Container for events for use with PISA pi
Parameters
----------
name : string, optional
Name to identify events
neutrinos : bool, optional
Flag indicating if events represent neutrinos; toggles special
behavior such as splitting into nu/nubar and CC/NC. Default is True.
fraction_events_to_keep : float
Fraction of loaded events to use (use to downsample).
Must be in range [0.,1.], or disable by setting to `None`.
Default in None.
*args, **kwargs
Passed on to `__init__` method of OrderedDict
"""
def __init__(
self,
*args,
name=None,
neutrinos=True,
fraction_events_to_keep=None,
events_subsample_index=0,
**kwargs
):
super().__init__(*args, **kwargs)
self.name = name
self.neutrinos = neutrinos
self.fraction_events_to_keep = fraction_events_to_keep
self.events_subsample_index = events_subsample_index
# Checks for down-sampling inputs
if self.fraction_events_to_keep is not None:
# Check `fraction_events_to_keep` value is required range
self.fraction_events_to_keep = float(self.fraction_events_to_keep)
assert (self.fraction_events_to_keep >= 0.) and (self.fraction_events_to_keep <= 1.), "`fraction_events_to_keep` must be in range [0.,1.], or None to disable"
# Check `fraction_events_to_keep` and `events_subsample_index` values are compatible
assert isinstance(self.events_subsample_index, int), f"`events_subsample_index` must be an integer"
assert self.events_subsample_index >= 0, f"`events_subsample_index` = {self.events_subsample_index}, but must be >= 0"
max_index = int(np.floor( 1. / self.fraction_events_to_keep )) - 1
assert self.events_subsample_index <= max_index, f"`events_subsample_index` = {self.events_subsample_index} is too large given `fraction_events_to_keep` = {self.fraction_events_to_keep} (max is {max_index})"
# Define some metadata
#TODO Is this out of date?
self.metadata = OrderedDict(
[
("detector", ""),
("geom", ""),
("runs", []),
("proc_ver", ""),
("cuts", []),
]
)
def load_events_file(self, events_file, variable_mapping=None, required_metadata=None, seed=123456):
"""Fill this events container from an input HDF5 file filled with event
data Optionally can provide a variable mapping so select a subset of
variables, rename them, etc.
Parameters
----------
events_file : string or mapping
If string, interpret as a path and load file at that path; the
loaded object should be a mapping. If already a mapping, take and
interpret events from that.
variable_mapping : mapping, optional
If specified, should be a mapping where the keys are the
destination variable names and the items are either the source
variable names or an iterable of source variables names. In the
latter case, each of the specified source variables will become a
column vector in the destination array.
required_metadata : None, or list of str
Can optionally specify metadata keys to parse from the input file metdata.
ONLY metadata specified here will be parsed.
Anything specified here MUST exist in the files.
"""
# Validate `events_file`
if not isinstance(events_file, (str, Mapping, Sequence)):
raise TypeError(
"`events_file` must be either string or mapping; got (%s)"
% type(events_file)
)
# Validate `variable_mapping`
if variable_mapping is not None:
if not isinstance(variable_mapping, Mapping):
raise TypeError("'variable_mapping' must be a mapping (e.g., dict)")
for dst, src in variable_mapping.items():
if not isinstance(dst, str):
raise TypeError("`variable_mapping` 'dst' (key) must be a string")
if isinstance(src, str):
pass # Nothing to do
elif isinstance(src, Iterable):
for v in src:
if not isinstance(v, str):
raise TypeError(
"`variable_mapping` 'src' (value) has at least"
" one element that is not a string"
)
else:
raise TypeError(
"`variable_mapping` 'src' (value) must be a string or"
" an iterable of strings"
)
# Validate `required_metadata`
if required_metadata is not None :
assert isinstance(required_metadata, Sequence)
assert all([ isinstance(k, str) for k in required_metadata ])
# Reporting
if self.fraction_events_to_keep is not None :
logging.info("Down-sampling events (keeping %0.2g%% of the total). Will take sub-sample %i." % (100.*self.fraction_events_to_keep, self.events_subsample_index))
#
# Loop over files
#
input_data = OrderedDict()
metadata = OrderedDict()
# Handle list of files vs single file
events_files_list = []
if isinstance(events_file, str):
events_files_list = [events_file]
elif isinstance(events_file, Mapping):
events_files_list = [events_file]
elif isinstance(events_file, Sequence):
events_files_list = events_file
# Loop over files
for i_file, infile in enumerate(events_files_list) :
#
# Parse variables from file
#
# Read the file
# If `variable_mapping` was specified, only load those variables (saves time/memory)
if isinstance(infile, str):
# If user provided a variable mapping, only load the requested variables.
# Remember to andle cases where the variable is defined as a list of variables in
# the cfg file.
if variable_mapping is None :
choose = None
else :
choose = []
for var_name in variable_mapping.values() :
if isinstance(var_name, str) :
choose.append(var_name)
elif isinstance(var_name, Sequence) :
for sub_var_name in var_name :
assert isinstance(sub_var_name, str), "Unknown variable format, must be `str`"
choose.append(sub_var_name)
else :
raise IOError("Unknown variable name format, must be `str` or list of `str`")
# Handle "oppo" flux backwards compatibility
# This means adding the old variable names into the chosen variable list
# The actual renaming is done later by `fix_oppo_flux`
if variable_mapping is not None :
for var_name in choose :
if var_name in OPPO_FLUX_LEGACY_FIX_MAPPING_NU :
choose.append( OPPO_FLUX_LEGACY_FIX_MAPPING_NU[var_name] )
if var_name in OPPO_FLUX_LEGACY_FIX_MAPPING_NUBAR :
choose.append( OPPO_FLUX_LEGACY_FIX_MAPPING_NUBAR[var_name] )
# Load the file
file_input_data = from_file(infile, choose=choose)
if not isinstance(file_input_data, Mapping):
raise TypeError(
'Contents loaded from "%s" must be a mapping; got: %s'
% (infile, type(file_input_data))
)
assert len(file_input_data) > 0, "No input data found"
# File already loaded
elif isinstance(infile, Mapping) :
file_input_data = infile
# Add to overall container
for k, v in file_input_data.items() :
append_arrays_dict(k, v, input_data)
#
# Parse metadata from file
#
if required_metadata is not None :
# Events and EventsPi objects have attr `metadata`
file_metadata = getattr(file_input_data, 'metadata', None)
# HDF files have attr `attrs` attached, if present (see pisa.utils.hdf)
if not file_metadata:
file_metadata = getattr(file_input_data, 'attrs', None)
if file_metadata:
# Check format
if not isinstance(file_metadata, Mapping):
raise TypeError(
"metadata or attrs expected to be a Mapping, but got {}".format(
type(file_metadata)
)
)
# Loop over expected metadata
for k in required_metadata :
assert k in file_metadata, "Expected metadata '%s' not found" % k
# For the special case of livetime, append livetiem from each file
# Otherwise, expect identical value in all cases
if k in self.metadata :
if k == "livetime" :
self.metadata[k] += file_metadata[k]
else :
assert self.metadata[k] == file_metadata[k]
else :
self.metadata[k] = file_metadata[k]
#
# Re-format inputs
#
# The following is intended to re-format input data into the desired
# format. This is required to handle various inout cases and to ensure
# backwards compatibility with older input file formats.
# Convert to the required event keys, e.g. "numu_cc", "nutaubar_nc", etc.
if self.neutrinos:
input_data = split_nu_events_by_flavor_and_interaction(input_data)
# The value for each category should itself be a dict of the event
# variables, where each entry is has a variable name as the key and an
# np.array filled once per event as the value.
#
# For backwards compatibility, convert to this format from known older
# formats first
if self.neutrinos:
for key, cat_dict in input_data.items():
if not isinstance(cat_dict, Mapping):
raise Exception(
"'%s' input data is not a mapping, unknown format (%s)"
% (key, type(cat_dict))
)
for var_key, var_data in cat_dict.items():
if not isinstance(var_data, np.ndarray):
raise Exception(
"'%s/%s' input data is not a numpy array, unknown"
" format (%s)" % (key, var_key, type(var_data))
)
# Ensure backwards compatibility with the old style "oppo" flux
# variables
if self.neutrinos:
fix_oppo_flux(input_data)
#
# Load the event data
#
# Should be organised under a single layer of keys, each representing
# some category of input data
# Loop over the input types
for data_key in input_data.keys():
if data_key in self:
raise ValueError(
"Key '%s' has already been added to this data structure"
)
self[data_key] = OrderedDict()
# Loop through variable mapping
# If none provided, just use all variables and keep the input names
if variable_mapping is None:
variable_mapping_to_use = tuple(
zip(input_data[data_key].keys(), input_data[data_key].keys())
)
else:
variable_mapping_to_use = variable_mapping.items()
# Init stuff for down-sampling later
chosen_event_indices = None
rand = np.random.RandomState(seed) # Enforce same sample each time
# Get the array data (stacking if multiple input variables defined)
# and check the variable exists in the input data
for var_dst, var_src in variable_mapping_to_use:
# TODO What about non-float data? Use dtype...
# Prepare for the stacking
array_data = None
if isinstance(var_src, str):
var_src = [var_src]
# Perform the stacking
array_data_to_stack = []
for var in var_src:
if var in input_data[data_key]:
array_data_to_stack.append(
input_data[data_key][var].astype(FTYPE)
)
else:
raise KeyError(
"Variable '%s' cannot be found for '%s' events"
% (var, data_key)
)
# Note `squeeze` removes the extraneous 2nd dim in case of a
# single `src`
array_data = np.squeeze(np.stack(array_data_to_stack, axis=1))
# Check actually have some data
if array_data is None:
raise ValueError(
"Cannot find source variable(s) '%s' for '%s'"
% (var_src, data_key)
)
#
# Event down sampling
#
# Only if requested by user
if self.fraction_events_to_keep is not None:
# Define events to keep only once for each species (e.g. same choice for all variables for a given species)
if chosen_event_indices is None :
# Get intitial conditions
initial_num_events = array_data.size
desired_num_events = int( self.fraction_events_to_keep * float(initial_num_events) )
# Start with all events as input
current_event_indices = np.array( range(initial_num_events) )
# Loop over subsamples (will break out once reach desired subsample)
i = 0
while True :
# Get indices for the events to keep for this current sub-sample
assert current_event_indices.size >= desired_num_events, "Not enough events available" # Earlier checks on `fraction_events_to_keep` and `events_subsample_index` should prevent this error ever happening
chosen_event_indices = np.sort( rand.choice(current_event_indices, replace=False, size=desired_num_events) )
# If this is the requested sub-sample, done here
if i == self.events_subsample_index :
break
# Otherwise have not yet reached our subsample.
# Choose the remaining events as the new input events in the algorithm,
# and on the next iteration of this loop these remaining events will be
# used for extracting the new sub-sample.
# This will result in statistically independent sub-samples
remaining_event_indices = np.sort( np.setxor1d(current_event_indices, chosen_event_indices) )
current_event_indices = remaining_event_indices
i += 1
# Report
logging.info("Down-sampled %s events : %i -> %i (%0.2g%%)" % ( data_key, array_data.size, chosen_event_indices.size, 100.*(chosen_event_indices.size/array_data.size) ))
# Extract just the requested events
array_data = array_data[chosen_event_indices]
# Add to array
self[data_key][var_dst] = array_data
def apply_cut(self, keep_criteria):
"""Apply a cut by specifying criteria for keeping events. The cut must
be successfully applied to all flav/ints in the events object before
the changes are kept, otherwise the cuts are reverted.
Parameters
----------
keep_criteria : string
Any string interpretable as numpy boolean expression.
Examples
--------
Keep events with true energies in [1, 80] GeV (note that units are not
recognized, so have to be handled outside this method)
>>> events = events.apply_cut("(true_energy >= 1) & (true_energy <= 80)")
Do the opposite with "~" inverting the criteria
>>> events = events.apply_cut("~((true_energy >= 1) & (true_energy <= 80))")
Numpy namespace is available for use via `np` prefix
>>> events = events.apply_cut("np.log10(true_energy) >= 0")
"""
assert isinstance(keep_criteria, str)
# Check if have already applied these cuts
if keep_criteria in self.metadata["cuts"]:
logging.debug(
"Criteria '%s' have already been applied. Returning"
" events unmodified.",
keep_criteria,
)
return self
# TODO Get everything from the GPU first ?
# Prepare the post-cut data container
cut_data = EventsPi(name=self.name)
cut_data.metadata = copy.deepcopy(self.metadata)
# Loop over the data containers
for key in self.keys():
cut_data[key] = {}
# TODO Need to think about how to handle array, scalar and binned data
# TODO Check for `events` data mode, or should this kind of logic
# already be in the Container class?
variables = self[key].keys()
# Create the cut expression, and get the resulting mask
crit_str = keep_criteria
for variable_name in variables:
crit_str = crit_str.replace(
variable_name, 'self["%s"]["%s"]' % (key, variable_name)
)
mask = eval(crit_str) # pylint: disable=eval-used
# Fill a new container with the post-cut data
for variable_name in variables:
cut_data[key][variable_name] = copy.deepcopy(
self[key][variable_name][mask]
)
# TODO update to GPUs?
# Record the cuts
cut_data.metadata["cuts"].append(keep_criteria)
return cut_data
def keep_inbounds(self, binning):
"""Cut out any events that fall outside `binning`. Note that events
that fall exactly on an outer edge are kept.
Parameters
----------
binning : OneDimBinning or MultiDimBinning
Returns
-------
cut_data : EventsPi
"""
# Get the binning instance
try:
binning = OneDimBinning(binning)
except: # pylint: disable=bare-except
pass
if isinstance(binning, OneDimBinning):
binning = [binning]
binning = MultiDimBinning(binning)
# Define a cut to remove events outside of the binned region
bin_edge_cuts = [dim.inbounds_criteria for dim in binning]
bin_edge_cuts = " & ".join([str(x) for x in bin_edge_cuts])
# Apply the cut
return self.apply_cut(bin_edge_cuts)
def __str__(self): # TODO Handle non-array data cases
string = "-----------------------------\n"
string += "EventsPi container %s :" % self.name
for key, container in self.items():
string += " %s :\n" % key
for var, array in container.items():
array_data = array
if len(array_data) <= 4:
array_data_string = str(array_data)
else:
array_data_string = "[%s, %s, ..., %s, %s]" % (
array_data[0],
array_data[1],
array_data[-2],
array_data[-1],
)
string += " %s : %i elements : %s\n" % (
var,
len(array_data),
array_data_string,
)
string += "-----------------------------"
return string
def split_nu_events_by_flavor_and_interaction(input_data):
"""Split neutrino events by nu vs nubar, and CC vs NC.
Should be compatible with DRAGON and GRECO samples, but this depends on the
contents of the original I3 files and whatever conversion script was used
to produce the HDF5 files from these.
Parameters
----------
input_data : mapping
Returns
-------
output_data : OrderedDict
"""
# TODO Split into one function for nu/nubar and one for CC/NC?
assert isinstance(input_data, Mapping)
assert input_data, "`input_data` has no members"
output_data = OrderedDict()
# Loop through subcategories in the input data
for key, data in input_data.items():
# If key already is one of the desired keys, nothing new to do
# Just move the data to the output container
if key in OUTPUT_NUFLAVINT_KEYS:
if key in output_data:
output_data[key] = np.concatenate(output_data[key], data)
else:
output_data[key] = data
continue
# Legacy PISA HDF5 files are structured as
# {"<flavor>": {"<int_type>": data}};
# and `flavor` can have "_" separating "bar". Remove such underscores
# and flatten the nested dicts into
# {"<flavor>_<int_type>": data}
# format
if key in LEGACY_FLAVKEY_XLATION:
new_flav_key = LEGACY_FLAVKEY_XLATION[key]
for sub_key, sub_data in data.items():
assert sub_key in ("cc", "nc"), str(sub_key)
output_key = new_flav_key + "_" + sub_key
if output_key in output_data:
output_data[output_key] = np.concatenate(
output_data[output_key], sub_data
)
else:
output_data[output_key] = sub_data
continue
assert "pdg_code" in data, "No 'pdg_code' variable found for %s data" % key
# Check these are neutrino events
assert np.all(np.isin(data["pdg_code"], NU_FLAVORS.values())), (
"%s data does not appear to be a neutrino data" % key
)
assert "interaction" in data, (
"No 'interaction' variable found for %s data" % key
)
# Define a mask to select the events for each desired output key
key_mask_pairs = [
("%s_%s" % (fk, ik), (data["pdg_code"] == fc) & (data["interaction"] == ic))
for fk, fc in NU_FLAVORS.items()
for ik, ic in NU_INTERACTIONS.items()
]
# Loop over the keys/masks and write the data for each class to the
# output container
for mkey, mask in key_mask_pairs:
if | np.any(mask) | numpy.any |
#!/usr/bin/env python
import argparse
import os
import sys
import csv
import h5py
import tensorflow.keras as keras
import tensorflow as tf
import numpy as np
from tqdm import tqdm
import cv2
import SimpleITK as sitk
import time
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
tf.compat.v1.enable_v2_behavior()
if __name__ == "__main__" and __package__ is None:
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
import fl_covid.bin # noqa: F401
__package__ = "fl_covid.bin"
# Change these to absolute imports if you copy this script outside the fl_covid package.
from ..utils.anchors import compute_overlap
from .. import models
from ..preprocessing.csv_generator import CSVGenerator
from ..utils.eval import _compute_ap, _get_annotations, _get_annotations_and_img_path
from ..utils.config import read_config_file, parse_anchor_parameters
from ..utils.keras_version import check_keras_version
from ..utils.visualization import draw_detections, draw_annotations
from ..utils.visualization import draw_box, label_color, draw_caption
from ..utils.image import preprocess_image, resize_image
from fl_covid.bin.train_fed import create_models
from fl_covid.bin.evaluate_overall import fp_reduce
def get_session():
""" Construct a modified tf session.
"""
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
return tf.Session(config=config)
def draw_label_hit(image, box, caption):
""" Draws a caption above the box in an image.
# Arguments
image : The image to draw on.
box : A list of 4 elements (x1, y1, x2, y2).
caption : String containing the text to draw.
"""
b = np.array(box).astype(int)
cv2.putText(image, caption, (b[0]+5, b[3] - 5), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 0), 2)
cv2.putText(image, caption, (b[0]+5, b[3] - 5), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
def draw_detections(image, boxes, scores, labels, color=None, label_to_name=None, slice_id=None, bbox_writer=None, score_threshold=0.4): # score_threshold used to be 0.5
""" Draws detections in an image.
# Arguments
image : The image to draw on.
boxes : A [N, 4] matrix (x1, y1, x2, y2).
scores : A list of N classification scores.
labels : A list of N labels.
color : The color of the boxes. By default the color from keras_retinanet.utils.colors.label_color will be used.
label_to_name : (optional) Functor for mapping a label to a name.
score_threshold : Threshold used for determining what detections to draw.
"""
selection = np.where(scores > score_threshold)[0]
for i in selection:
c = color if color is not None else label_color(labels[i])
if bbox_writer is not None and slice_id is not None:
tar_path = 'slice_{}.png'.format(slice_id)
b = np.array(boxes[i, :]).astype(int)
bbox_writer.writerow([tar_path]+ [b[0],b[1],b[2],b[3]]+['lesion'])
draw_box(image, boxes[i, :], color=c,thickness=1)
# draw labels
caption = (label_to_name(labels[i]) if label_to_name else str(labels[i])) + ': {0:.2f}'.format(scores[i])
draw_caption(image, boxes[i, :], caption)
def read_h5(img_path):
with h5py.File(img_path, "r") as hf:
arr = hf['arr'][:]
return arr
def draw_colorful_result(
args,
client_name,
patient_name,
iou_threshold=0.5,
score_threshold=0.05,
max_detections=100,
save_path=None
):
def _parse(value, function, fmt):
"""Parse a string into a value, and format a nice ValueError if it fails.
Returns `function(value)`.
Any `ValueError` raised is catched and a new `ValueError` is raised
with message `fmt.format(e)`, where `e` is the caught `ValueError`.
"""
try:
return function(value)
except ValueError as e:
raise ValueError(fmt.format(e))
if args.reduce_fp:
sign = 'fp_reduced_'
else:
sign=''
bbox_result_path = os.path.join(save_path,'{}_{}_score_thres_{}_bbox.csv'.format(client_name, patient_name, score_threshold))
anno_result_path = os.path.join(save_path,'{}_{}_score_thres_{}_anno.csv'.format(client_name, patient_name, score_threshold))
all_annotations_img_path = np.load(os.path.join(save_path, '{}_{}_annotations_img_path.npy'.format(client_name, patient_name)), allow_pickle=True)
# prepare annotation result
anno_result = {}
annos = open(anno_result_path, 'r')
classes = {'lesion': 0}
for line, row in enumerate(annos):
splits = row.split(',')
# print(splits)
# print(len(splits))
try:
img_file, x1, y1, x2, y2, class_name, hit_cnt = splits
hit_cnt = hit_cnt.replace('\n', '')
except ValueError:
raise ValueError(
'line {}: format should be \'img_file,x1,y1,x2,y2,class_name\' or \'img_file,,,,,\''.format(line))
if img_file not in anno_result:
anno_result[img_file] = []
x1 = _parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))
y1 = _parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))
x2 = _parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))
y2 = _parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))
hit_cnt = _parse(hit_cnt, int, 'line {}: malformed hit count: {{}}'.format(line))
if x2 <= x1:
raise ValueError('line {}: x2 ({}) must be higher than x1 ({})'.format(line, x2, x1))
if y2 <= y1:
raise ValueError('line {}: y2 ({}) must be higher than y1 ({})'.format(line, y2, y1))
# check if the current class name is correctly present
if str(class_name) not in classes:
raise ValueError(
'line {}: unknown class name: \'{}\' (classes: {})'.format(line, class_name, classes))
anno_result[img_file].append({'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2, 'class': class_name, 'hit_cnt':hit_cnt})
# prepare prediction bbox result
bbox_result = {}
bboxs = open(bbox_result_path, 'r')
classes = {'lesion': 0}
for line, row in enumerate(bboxs):
splits = row.split(',')
try:
img_file, x1, y1, x2, y2, class_name, score, box_type = splits
box_type = box_type.replace('\n', '')
except ValueError:
raise ValueError(
'line {}: format should be \'img_file,x1,y1,x2,y2,class_name\' or \'img_file,,,,,\''.format(line))
if img_file not in bbox_result:
bbox_result[img_file] = []
x1 = _parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))
y1 = _parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))
x2 = _parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))
y2 = _parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))
if x2 <= x1:
raise ValueError('line {}: x2 ({}) must be higher than x1 ({})'.format(line, x2, x1))
if y2 <= y1:
raise ValueError('line {}: y2 ({}) must be higher than y1 ({})'.format(line, y2, y1))
# check if the current class name is correctly present
if str(class_name) not in classes:
raise ValueError(
'line {}: unknown class name: \'{}\' (classes: {})'.format(line, class_name, classes))
bbox_result[img_file].append({'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2, 'class': class_name, 'score':score, 'box_type': str(box_type)})
detection_out = np.zeros([len(all_annotations_img_path), 512, 512, 3])
for i in tqdm(range(len(all_annotations_img_path)), desc='Drawing colorful {} result on {} {}: '.format(sign, client_name, patient_name)):
img_path = all_annotations_img_path[i]
raw_img = read_h5(img_path)
# print(img_path)
image = raw_img.copy()
if keras.backend.image_data_format() == 'channels_first':
image = image.transpose((2, 0, 1))
if img_path in anno_result:
for anno_index in range(len(anno_result[img_path])):
# draw annotation
hit_cnt = anno_result[img_path][anno_index]['hit_cnt']
caption = '{}'.format(hit_cnt)
anno_box = [anno_result[img_path][anno_index]['x1'], anno_result[img_path][anno_index]['y1'], anno_result[img_path][anno_index]['x2'],anno_result[img_path][anno_index]['y2']]
draw_label_hit(image, anno_box , caption)
draw_box(image, anno_box, color=[0,255,0], thickness=1)
if img_path in bbox_result:
for bbox_index in range(len(bbox_result[img_path])):
pred_box = [bbox_result[img_path][bbox_index]['x1'], bbox_result[img_path][bbox_index]['y1'], bbox_result[img_path][bbox_index]['x2'],bbox_result[img_path][bbox_index]['y2']]
box_type = str(bbox_result[img_path][bbox_index]['box_type'])
score = float(bbox_result[img_path][bbox_index]['score'])
# print(box_type)
# print('assigned_gt')
# print(box_type=='assigned_gt')
if box_type == 'max_overlap':
box_color = [31, 0, 255]
elif box_type == 'assigned_pre':
box_color =[184, 0, 255]
elif box_type == 'assigned_gt':
box_color = [139, 69, 19]
elif box_type == 'fp':
box_color = [225, 0, 0]
else:
raise ValueError("Unknown box type :{}".format(box_type))
draw_box(image, pred_box, color=box_color, thickness=1)
caption = ('{0:.2f}'.format(score))
draw_caption(image, pred_box, caption)
detection_out[i, :, :] = image
print('Writing colorful results on {} {}...'.format(client_name, patient_name))
detection_out = sitk.GetImageFromArray(detection_out)
sitk.WriteImage(detection_out, os.path.join(save_path, '{}_{}_colorful_detection_{}result.nii.gz'.format(client_name, patient_name, sign)))
def create_generator(args):
""" Create generators for evaluation.
"""
if args.dataset_type == 'csv':
validation_generator = CSVGenerator(
args.annotations,
args.classes,
image_min_side=args.image_min_side,
image_max_side=args.image_max_side,
config=args.config,
shuffle_groups=False
)
else:
raise ValueError('Invalid data type received: {}'.format(args.dataset_type))
return validation_generator
def _seg_filter(bboxes,scores_sort,seg):
image_boxes = bboxes
inner = np.asarray([],dtype=np.bool)
flag = False
for i in range(image_boxes.shape[0]):
x1 = int(image_boxes[i][0])
y1 = int(image_boxes[i][1])
x2 = int(image_boxes[i][2])
y2 = int(image_boxes[i][3])
x1 = 511 if x1 > 511 else x1
y1 = 511 if y1 > 511 else y1
x2 = 511 if x2 > 511 else x2
y2 = 511 if y2 > 511 else y2
# print(scores_sort)
# print(scores_sort.shape)
if (seg[y1,x1,:] == 0).all() and (seg[y2,x2,:] == 0).all() and (seg[y1,x2,:] == 0).all() and (seg[y2,x1,:] == 0).all():
inner = np.append(inner,False)
flag=True
# scores_sort = np.delete(scores_sort,i,axis=0)
else:
inner = | np.append(inner, True) | numpy.append |
import numpy as np
import matplotlib.pyplot as plt
import glob
import json
configs = ['kp200_startVel_special']
def running_mean(x, N):
cumsum = np.cumsum(np.insert(x, 0, 0))
return (cumsum[N:] - cumsum[:-N]) / float(N)
labels = {}
labels["NoIntent"] = "CarEnv-TenOpponent-States-SpeedControl-TL-v0_"
labels['Intent'] = "CarEnv-TenOpponentWithIntention-States-SpeedControl-TL-v0_"
for config in configs:
print(glob.glob("data/"+labels["NoIntent"]+config+"*.npy"))
# print(glob.glob("data/"+labels["PredIntent"]+config+"*.npy"))
print(glob.glob("data/" + labels["Intent"]+config+"*.npy"))
for key, val in labels.items():
for config in configs:
fnames = glob.glob("data/" + val + config + "*.npy")
data = []
for fname in fnames:
data.append(np.load(fname)[:13392])
data = | np.array(data) | numpy.array |
#!/usr/bin/env python
"""
Perform the Mann-Whitney U test, the Kolmogorov-Smirnov test, and the Student's
t-test for the following ensembles:
- GPU double precision (reference & control)
- CPU double precision
- GPU single precision
- GPU double precision with additional explicit diffusion
Make sure to compile the cpp files for the Mann-Whitney U test and the
Kolmogorov-Smirnov test first before running this script (see mannwhitneyu.cpp
and kolmogorov_smirnov.cpp).
Copyright (c) 2021 ETH Zurich, <NAME>
MIT License
"""
import numpy as np
import xarray as xr
import pickle
import mannwhitneyu as mwu
import kolmogorov_smirnov as ks
rpert = 'e4' # prefix
n_runs = 50 # total number of runs
n_sel = 100 # how many times we randomly select runs
alpha = 0.05 # significance level
nm = 20 # members per ensemble
u_crit = 127 # nm = 20
t_crit = 2.024 # nm = 20
replace = False # to bootstrap or not to bootstrap
nbins = 100 # Kolmogorov-Smirnov
# Some arrays to make life easier
tests = ['mwu', 'ks', 't']
comparisons = ['c', 'cpu', 'sp', 'diff']
# Variable
variables = ['t_850hPa', 'fi_500hPa', 'u_10m', 't_2m', 'precip', 'asob_t',
'athb_t', 'ps']
path_gpu = '../data/10d_gpu_cpu_sp_diff/gpu_dycore/'
path_cpu = '../data/10d_gpu_cpu_sp_diff/cpu_nodycore/'
path_gpu_sp = '../data/10d_gpu_cpu_sp_diff/gpu_dycore_sp/'
path_gpu_diff = '../data/10d_gpu_cpu_sp_diff/gpu_dycore_diff/'
# Final rejection rates
rej_rates = {}
for comp in comparisons:
rej_rates[comp] = {}
for vname in variables:
rej_rates[comp][vname] = {}
runs_r = {}
runs_c = {}
runs_cpu = {}
runs_sp = {}
runs_diff = {}
# Load data for gpu (reference and control) and cpu
for i in range(n_runs):
i_str_r = str(i).zfill(4)
i_str_c = str(i+n_runs).zfill(4)
fname_r = path_gpu + rpert + '_' + i_str_r + '.nc'
fname_c = path_gpu + rpert + '_' + i_str_c + '.nc'
fname_cpu = path_cpu + rpert + '_' + i_str_r + '.nc'
fname_sp = path_gpu_sp + rpert + '_' + i_str_r + '.nc'
fname_diff = path_gpu_diff + rpert + '_' + i_str_r + '.nc'
runs_r[i] = {}
runs_c[i] = {}
runs_cpu[i] = {}
runs_sp[i] = {}
runs_diff[i] = {}
runs_r[i]['dset'] = xr.open_dataset(fname_r)
runs_c[i]['dset'] = xr.open_dataset(fname_c)
runs_cpu[i]['dset'] = xr.open_dataset(fname_cpu)
runs_sp[i]['dset'] = xr.open_dataset(fname_sp)
runs_diff[i]['dset'] = xr.open_dataset(fname_diff)
# Test for each variable
for vname in variables:
print("----------------------------")
print("Working on " + vname + " ...")
print("----------------------------")
# initialize arrays
nt, ny, nx = runs_r[0]['dset'][vname].shape
values_r = np.zeros((nt, ny, nx, nm))
values_c = np.zeros((nt, ny, nx, nm))
values_cpu = np.zeros((nt, ny, nx, nm))
values_sp = np.zeros((nt, ny, nx, nm))
values_diff = np.zeros((nt, ny, nx, nm))
# For the results
results = {}
for test in tests:
results[test] = {}
for comp in comparisons:
results[test][comp] = np.zeros((n_sel, nt))
# Do test multiple times with random selection of ensemble members
for s in range(n_sel):
if ((s+1) % 10 == 0):
print(str(s+1) + " / " + str(n_sel))
# Pick random samples for comparison
idxs_r = np.random.choice(np.arange(n_runs), nm, replace=replace)
idxs_c = np.random.choice(np.arange(n_runs), nm, replace=replace)
idxs_cpu = np.random.choice(np.arange(n_runs), nm, replace=replace)
idxs_sp = np.random.choice(np.arange(n_runs), nm, replace=replace)
idxs_diff = np.random.choice(np.arange(n_runs), nm, replace=replace)
# ============================================================
# Mann-Whitney U test
# ============================================================
test = 'mwu'
# Put together arrays
for i in range(nm):
values_r[:,:,:,i] = runs_r[idxs_r[i]]['dset'][vname].values
values_c[:,:,:,i] = runs_c[idxs_c[i]]['dset'][vname].values
values_cpu[:,:,:,i] = runs_cpu[idxs_cpu[i]]['dset'][vname].values
values_sp[:,:,:,i] = runs_sp[idxs_sp[i]]['dset'][vname].values
values_diff[:,:,:,i] = runs_diff[idxs_diff[i]]['dset'][vname].values
# Call test
reject_c = mwu.mwu(values_r, values_c, u_crit)
reject_cpu = mwu.mwu(values_r, values_cpu, u_crit)
reject_sp = mwu.mwu(values_r, values_sp, u_crit)
reject_diff = mwu.mwu(values_r, values_diff, u_crit)
results[test]['c'][s] = np.mean(reject_c, axis=(1,2))
results[test]['cpu'][s] = np.mean(reject_cpu, axis=(1,2))
results[test]['sp'][s] = np.mean(reject_sp, axis=(1,2))
results[test]['diff'][s] = np.mean(reject_diff, axis=(1,2))
# ============================================================
# Kolmogorov-Smirnov test
# ============================================================
test = 'ks'
# Call test
reject_c = ks.ks(values_r, values_c, nbins)
reject_cpu = ks.ks(values_r, values_cpu, nbins)
reject_sp = ks.ks(values_r, values_sp, nbins)
reject_diff = ks.ks(values_r, values_diff, nbins)
results[test]['c'][s] = np.mean(reject_c, axis=(1,2))
results[test]['cpu'][s] = np.mean(reject_cpu, axis=(1,2))
results[test]['sp'][s] = np.mean(reject_sp, axis=(1,2))
results[test]['diff'][s] = np.mean(reject_diff, axis=(1,2))
# ============================================================
# Student's t-test
# ============================================================
test = 't'
# Means
mean_r = np.mean(values_r, axis=-1)
mean_c = np.mean(values_c, axis=-1)
mean_cpu = np.mean(values_cpu, axis=-1)
mean_sp = np.mean(values_sp, axis=-1)
mean_diff = | np.mean(values_diff, axis=-1) | numpy.mean |
"""
data_utils.py
Collection of functions for dealing with data for plotting.
Author: <NAME>
Date: 01/27/2020
"""
from __future__ import absolute_import, division, print_function
import os
import arviz as az
import numpy as np
import tensorflow as tf
import pandas as pd
import xarray as xr
from dataclasses import dataclass
import seaborn as sns
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from typing import Dict
from pathlib import Path
import utils.file_io as io
from utils.file_io import timeit
from lattice.utils import u1_plaq_exact
from utils.attr_dict import AttrDict
# from plotters.plot_utils import get_matching_log_dirs
# from plotters.plot_observables import get_obs_dict, grid_plot
mpl.style.use('fast')
sns.set_palette('bright')
TLS_DEFAULT = mpl.rcParams['xtick.labelsize']
@dataclass
class RunParams:
hmc: bool
run_dir: str
eps: float
beta: float
run_steps: int
plaq_weight: float
charge_weight: float
num_steps: int
x_shape: tuple
input_shape: tuple
def __post__init__(self):
self.traj_len = self.num_steps * self.eps
@dataclass
class ChargeData:
q: tf.Tensor
dq: tf.Tensor
params: RunParams
# pylint:disable=invalid-name
def filter_dict(d, cond, key=None):
if key is not None:
val = d[key]
if isinstance(val, dict):
return {
k: v for k, v in val.items() if cond
}
raise ValueError('If passing a key, d[key] must be a dict.')
return {
k: v for k, v in d.items() if cond
}
def _look(p, s, conds=None):
print(f'Looking in {p}...')
matches = [x for x in Path(p).rglob(f'*{s}*')]
if conds is not None:
if isinstance(conds, (list, tuple)):
for cond in conds:
matches = [x for x in matches if cond(x)]
else:
matches = [x for x in matches if cond(x)]
return matches
def _get_dirs(paths, hmc=False):
def _look(p, s, conds=None):
print(f'Looking in {p}...')
matches = [x for x in Path(p).rglob(f'*{s}*')]
if conds is not None:
if isinstance(conds, (list, tuple)):
for cond in conds:
matches = [x for x in matches if cond(x)]
else:
matches = [x for x in matches if cond(x)]
return matches
dirs = []
if hmc:
search_str = 'HMC_L16_b'
conds = (
lambda x: 'hmc_logs' in str(x),
lambda x: 'hmc' in str(x).lower()
)
else:
search_str = 'L16_b2048'
conds = (
lambda x: 'GaugeModel_logs' in (str(x)),
lambda x: 'HMC_' not in str(x),
lambda x: Path(x).is_dir(),
)
if isinstance(paths, (list, tuple)):
for path in paths:
dirs += _look(path, search_str, conds)
else:
dirs = _look(paths, search_str, conds)
return dirs
def load_from_dir(d, fnames=None):
if fnames is None:
fnames = {
'dq': 'dq.z',
'charges': 'charges.z',
'run_params': 'run_params.z'
}
darr = [x for x in Path(d).iterdir() if x.is_dir()]
for rd in darr:
files = {k: sorted(rd.glob(f'*{v}*')) for k, v in fnames.items()}
data = {k: io.loadz(v) for k, v in files.items()}
return data
def load_charge_data(dirs, hmc=False):
data = {}
for d in dirs:
print(f'Looking in dir: {d}...')
if 'inference_hmc' in str(d):
print(f'Skipping {str(d)}...')
continue
dqfile = sorted(d.rglob('dq.z'))
qfile = sorted(d.rglob('charges.z'))
rpfile = sorted(d.rglob('run_params.z'))
num_runs = len(dqfile)
if num_runs > 0:
for dqf, qf, rpf in zip(dqfile, qfile, rpfile):
params = io.loadz(rpf)
if 'xeps' and 'veps' in params.keys():
xeps = np.array([i.numpy() for i in params['xeps']])
veps = np.array([i.numpy() for i in params['veps']])
eps = (np.mean(xeps) + np.mean(veps)) / 2.
elif 'eps' in params.keys():
eps = params['eps']
params['eps'] = eps
params = RunParams(**params)
qarr = io.loadz(qf)
dqarr = io.loadz(dqf)
print(
'...loading data for (beta, num_steps, eps): '
f'({params.beta}, {params.num_steps}, {params.eps:.3g})'
)
charge_data = ChargeData(q=qarr, dq=dqarr, params=params)
try:
data[params.beta].update({params.traj_len: charge_data})
except KeyError:
data[params.beta] = {params.traj_len: charge_data}
# def _update_dict(beta, z, qdata):
# try:
# z[beta].update({params.traj_len: qdata})
# except KeyError:
# z[beta] = {params.traj_len: qdata}
#
# return z
#
# data = _update_dict(params.beta, data, charge_data)
return data
def calc_tau_int(data, therm_frac=0.2):
"""Calculate the integrated autocorrelation time."""
tau_int = {}
for key, val in data.items():
tau_int[key] = {}
for k, v in val.items():
arr, _ = therm_arr(v, therm_frac=therm_frac)
arr = arr.T
pass
# reference:
# https://github.com/rougier/numpy-100/blob/master/100_Numpy_exercises_with_solutions.md
# ----------
# Problem:
# 100. Compute bootstrapped 95% confidence intervals for the mean of a 1D
# array X (i.e., resample the elements of an array with replacement N times,
# compute the mean of each sample, and then compute percentiles over the
# means).
def bootstrapped_confidence_interval(x: np.ndarray, N: int = 1000):
idx = np.random.randint(0, x.size, (N, x.size))
means = x[idx].mean(axis=1)
confint = np.percentile(means, [2.5, 97.5])
return confint
# Reference: https://dfm.io/posts/autocorr/
def next_pow_two(n):
i = 1
while i < n:
i = i << 1
return i
def autocorr_func_1d(x, norm=True):
"""Compute the autocorrelation function of a 1D chain."""
x = np.atleast_1d(x)
if len(x.shape) != 1:
raise ValueError('Invalid dimensions for 1D autocorrelation function.')
n = next_pow_two(len(x))
# Compute the FFT and then (from that) the auto-correlation function
f = np.fft.fft(x - np.mean(x), n=2*n)
acf = np.fft.ifft(f * np.conjugate(f))[:len(x)].real
acf /= 4 * n
# Optionally normalize
if norm:
acf /= acf[0]
return acf
def auto_window(taus, c):
"""Automated windowing procedure following Sokal (1989)."""
m = np.arange(len(taus)) < c * taus
if np.any(m):
return np.argmin(m)
return len(taus) - 1
def autocorr_gw2010(y, c=5.0):
"""Following the suggestion from Goodman & Weare (2010)."""
f = autocorr_func_1d(np.mean(y, axis=0))
taus = 2.0 * np.cumsum(f) - 1.0
window = auto_window(taus, c)
return taus[window]
def autocorr_new(y, c=5.0):
"""New implementation of autocorrelation function."""
f = np.zeros(y.shape[1])
for yy in y:
f += autocorr_func_1d(yy)
f /= len(y)
taus = 2.0 * np.cumsum(f) - 1.0
window = auto_window(taus, c)
return taus[window]
def calc_autocorr(x):
N = np.exp(np.linspace(np.log(100), np.log(y.shape[1]), 20)).astype(int)
new = np.empty(len(N))
for i, n in enumerate(N):
new[i] = autocorr_new(y[:, :n])
return N, new
def flatten_dict(d):
"""Recursively convert all entries of `d` to be `AttrDict`."""
if not isinstance(d, AttrDict):
d = AttrDict(**d)
for key, val in d.items():
if isinstance(val, dict):
if not isinstance(val, AttrDict):
d[key] = flatten_dict(val)
else:
d[key] = AttrDict(**val)
return d
def _load_inference_data(log_dir, fnames, inference_str='inference'):
"""Helper function for loading inference data from `log_dir`."""
run_dir = os.path.join(log_dir, inference_str)
if os.path.isdir(run_dir):
data_dir = os.path.join(run_dir, 'run_data')
rp_file = os.path.join(run_dir, 'run_params.z')
if os.path.isfile(rp_file) and os.path.isdir(data_dir):
run_params = io.loadz(rp_file)
key = (run_params['beta'],
run_params['eps'],
run_params['num_steps'])
data = [
io.loadz(os.path.join(data_dir, f'{fname}.z'))
for fname in fnames
]
return key, data
def load_inference_data(dirs, search_strs, inference_str='inference'):
data = {
s: {} for s in search_strs
}
for d in dirs:
print(f'Looking in dir: {d}...')
run_dir = Path(os.path.join(d, inference_str))
if run_dir.is_dir():
run_dirs = [x for x in run_dir.iterdir() if x.is_dir()]
for rd in run_dirs:
print(f'...looking in run_dir: {rd}...')
rp_file = os.path.join(str(rd), 'run_params.z')
if os.path.isfile(rp_file):
params = io.loadz(rp_file)
beta = params['beta']
eps = params['eps']
num_steps = params['num_steps']
data_dir = os.path.join(str(rd), 'run_data')
if os.path.isdir(data_dir):
for search_str in search_strs:
dfile = os.path.join(data_dir, f'{search_str}.z')
if os.path.isfile(dfile):
_data = io.loadz(dfile)
try:
data[search_str].update({
(beta, num_steps, eps): _data
})
except KeyError:
data[search_str] = {
(beta, num_steps, eps): _data
}
return data
def _get_l2hmc_dirs(paths, search_str=None):
"""Look for `log_dirs` containing a training/inference run for L2HMC."""
if search_str is None:
search_str = '*L16_b*'
dirs = []
for path in paths:
if not isinstance(path, Path):
path = Path(os.path.abspath(path))
print(f'Looking in {path}...')
dirs += [
x for x in path.rglob(search_str)
if 'GaugeModel_logs' in str(x)
and 'HMC_' not in str(x)
and x.is_dir()
]
return dirs
def get_l2hmc_dirs():
bd_local = os.path.abspath(
'/Users/saforem2/thetaGPU/training'
)
bd_theta = os.path.abspath(
'/lus/theta-fs0/projects/DLHMC/thetaGPU/training'
)
l2hmc_dirs = []
if os.path.isdir(bd_local):
l2hmc_dirs += _get_l2hmc_dirs(bd_local)
if os.path.isdir(bd_theta):
l2hmc_dirs += _get_l2hmc_dirs(bd_theta)
return l2hmc_dirs
def get_hmc_dirs(base_dir=None):
if base_dir is None:
base_dir = os.path.abspath(
'/lus/theta-fs0/projects/DLHMC/thetaGPU/inference/'
)
if not os.path.isdir(base_dir):
base_dir = os.path.abspath(
'/Users/saforem2/thetaGPU/inference'
)
if not os.path.isdir(base_dir):
raise FileNotFoundError(f'Unable to locate {base_dir}')
base_dir = Path(base_dir)
hmc_dirs = [x for x in hmc_dir.rglob('*HMC_L16*') if x.is_dir()]
return hmc_dirs
def bootstrap(x, reps=10000):
n = len(x)
xb = np.random.choice(x, (n, reps), replace=True)
yb = xb.mean(axis=0)
upper, lower = np.percentile(yb, [2.5, 97.5])
return yb, (lower, upper)
def dq_stats(dq, reps=10000, therm_frac=0.2):
stats = {}
for key, val in dq.items():
for k, v in val.items():
data = therm_arr(v, therm_frac=therm_frac, ret_steps=False)
avgs = []
errs = []
for chain in data.T:
avg, (lower, upper) = bootstrap(chain, reps)
err = np.max([np.abs(avg - lower), np.abs(upper - avg)])
avgs.append(avg)
errs.append(err)
try:
stats[key].update({
k: {
'avg': np.mean(avgs),
'avg_std': np.std(avgs),
'err': np.mean(errs),
'err_std': np.std(errs),
'min': np.min(data),
'max': np.max(data),
}
})
except KeyError:
stats[key] = {
k: {
'avg': np.mean(avgs),
'avg_std': np.std(avgs),
'err': np.mean(errs),
'err_std': np.std(errs),
'min': np.min(data),
'max': np.max(data),
},
}
return stats
def autocorrelation_time(x, s, mu, var):
"""Compute the autocorrelation time."""
b, t, d = x.shape
act_ = np.zeros([d])
for i in range(b):
y = x[i] - mu
p, n = y[:-s], y[s:]
act_ += np.mean(p * n, axis=0) / var
act_ /= b
return act_
def effective_sample_size(x, mu, var):
# batch_size, time, dimension
b, t, d = x.shape
ess_ = np.ones([d])
for s in range(1, t):
p = autocorrelation_time(x, s, mu, var)
if np.sum(p > 0.05) == 0:
break
else:
for j in range(d):
if p[j] > 0.05:
ess_[j] += 2. * p[j] * (1. - float(s) / t)
return t / ess_
def batch_means_ess(x):
"""Estimate the ESS.
We estimate the ESS as the ratio of the variance of the batch means to the
variance of the chain, [ref](https://arxiv.org/pdf/1011.0175.pdf).
NOTE: `x` should be a chain with shape: [time_steps, num_chains, dim].
"""
x = np.transpose(x, [1, 0, 2])
T, M, D = x.shape
num_batches = int(np.floor(T ** (1 / 3)))
batch_size = int(np.floor(num_batches ** 2))
batch_means = []
for i in range(num_batches):
batch = x[batch_size * i:batch_size * i + batch_size]
batch_means.append(np.mean(batch, axis=0))
batch_variance = np.var(np.array(batch_means), axis=0)
chain_variance = np.var(x, axis=0)
act = batch_size * batch_variance / (chain_variance + 1e-20)
return 1. / act
def _calc_var_explained(x):
"""Calculate the % variance explained by the singular values of `x`."""
_, s, _ = np.linalg.svd(x, full_matrices=True)
return s ** 2 / np.sum(s ** 2)
def calc_var_explained(weights_dict):
"""Calculate the % variance explained by the sv's for each weight mtx."""
xweights = weights_dict['xnet']
vweights = weights_dict['vnet']
var_explained = {}
for ((xk, xv), (vk, vv)) in zip(xweights.items(), vweights.items()):
xk_ = f'xnet_{xk}'
vk_ = f'vnet_{vk}'
var_explained[xk_] = _calc_var_explained(xv)
var_explained[vk_] = _calc_var_explained(vv)
return var_explained
def bootstrap_old(data, n_boot=10000, ci=68):
"""Bootstrap resampling.
Returns:
mean (float): Mean of the (bootstrap) resampled data.
err (float): Standard deviation of the (bootstrap) resampled data.
data_rs (np.ndarray): Boostrap resampled data.
"""
if not isinstance(data, np.ndarray):
data = np.array(data)
step_axis = np.argmax(data.shape)
samples = []
for _ in range(int(n_boot)):
resampler = np.random.randint(0,
data.shape[step_axis],
data.shape[step_axis])
sample = data.take(resampler, axis=step_axis)
samples.append(np.mean(sample, axis=step_axis))
data_rs = np.array(samples)
mean = np.mean(data_rs)
err = np.std(data_rs)
return mean, err, data_rs
def calc_tunneling_rate(charges):
"""Calculate the tunneling rate as Q_{i+1} - Q_{i}."""
if not isinstance(charges, np.ndarray):
charges = np.array(charges)
charges = charges.T if charges.shape[0] > charges.shape[1] else charges
charges = | np.around(charges) | numpy.around |
from os import path, mkdir, listdir
import numpy as np
np.random.seed(1)
import random
random.seed(1)
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
tf.set_random_seed(1)
import timeit
import cv2
from models import get_densenet121_unet_softmax
from tqdm import tqdm
import argparse, logging
test_folder = path.join('..', 'data_test')
models_folder = 'nn_models'
test_pred = path.join('..', 'predictions', 'densenet_test_pred_2')
all_ids = []
all_images = []
all_masks = []
def preprocess_inputs(x):
x = np.asarray(x, dtype='float32')
x /= 127.5
x -= 1.
return x
def bgr_to_lab(img):
lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(17, 17))
lab = clahe.apply(lab[:, :, 0])
if lab.mean() > 127:
lab = 255 - lab
return lab[..., np.newaxis]
#def load_image(path):
# bf = bfio.BioReader(path)
# image = bf.read_image()
# image=image[:,:,0,:,0]
# if image.shape[2] == 3:
# return image
# elif image.shape[2]==1:
# return np.dstack((image[:,:,0], image[:,:,0],image[:,:,0]))
def factors(n):
# Modified from https://stackoverflow.com/a/6909532
f = [1,1]
for i in range(1,int(n**0.5)+1):
if n % i == 0:
f = [i,n//i]
return f
if __name__ == '__main__':
# intialize logging
logging.basicConfig(format='%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S')
logger = logging.getLogger("main")
logger.setLevel(logging.INFO)
# setup arguement parsing
parser = argparse.ArgumentParser(prog='predict_densenet')
parser.add_argument('--num_tiles', dest='num_tiles', type=int, required=True)
# parse the arguments
args = parser.parse_args()
num_tiles = args.num_tiles
t0 = timeit.default_timer()
if not path.isdir(test_pred):
mkdir(test_pred)
models = []
logger.info('Loading densenet models...')
for it in range(4):
model = get_densenet121_unet_softmax((None, None), weights=None)
model.load_weights(path.join(models_folder, 'densenet_weights_{0}.h5'.format(it)))
models.append(model)
#print('Predicting test')
for d in tqdm(listdir(test_folder)):
logger.info('Predicting image: {}'.format(d))
fid = d
full_img = cv2.imread(path.join(test_folder, fid), cv2.IMREAD_COLOR)
if num_tiles == None:
num_tiles = 1
f = factors(num_tiles)
X_TILE_SIZE = 512 * f[1]
Y_TILE_SIZE = 512 * f[0]
predicted = | np.zeros((full_img.shape[0], full_img.shape[1], 3), dtype='uint8') | numpy.zeros |
import argparse
import numpy as np
import pandas as pd
from Bio import SearchIO, SeqIO
from pathlib import Path
"""
calculate identity, positive, and coverage considering missing residues
"""
def calc_coverage_from_pir(pir_file):
query_record, template_record = SeqIO.parse(pir_file, 'pir')
query_seq, template_seq = query_record.seq, template_record.seq
template_not_gap_indices = np.where( | np.array(template_seq) | numpy.array |
import sys
import open3d
import numpy as np
import time
import os
from geometric_registration.utils import get_pcd, get_keypts, get_desc, loadlog
import cv2
from functools import partial
def build_correspondence(source_desc, target_desc):
"""
Find the mutually closest point pairs in feature space.
source and target are descriptor for 2 point cloud key points. [5000, 32]
"""
distance = np.sqrt(2 - 2 * (source_desc @ target_desc.T))
source_idx = np.argmin(distance, axis=1)
source_dis = | np.min(distance, axis=1) | numpy.min |
from collections import OrderedDict
import numpy as np
import matplotlib as mpl
mpl.rcParams['hatch.linewidth']=2.0
import matplotlib.pyplot as plt
fs=25
plt.rc('xtick',labelsize=int(fs*.8))
plt.rc('ytick',labelsize=int(fs*.8))
import ROOT as rt
from ROOT import TH1F,TColor
import os
# FIXME SetYTitle
class BinaryClassifierResponse(object):
def __init__(self,
name,
title,
directory,
pt):
f=np.load("jj25{}.npz".format(pt))
self.bdt=f["bdtset"]
#cmult,nmult,ptd,axis1,axis2,
#cmult,nmult,ptd,axis1,axis2,
#dr,pt/pt,mult/mult
self.eve=f["eveset"]
self.pairlist=f["pairlist"]
self._name = name
self._title = title
self._path = os.path.join(directory, name + ".{ext}")
def append(self, pt,var):
res=30
maxv=self.bdt[:,var].max()
if(maxv<self.bdt[:,var].max()):
maxv=self.bdt[:,var].max()
print(pt)
if(pt==100):maxpt=175
if(pt==200):maxpt=357
if(pt==500):maxpt=874
if(pt==1000):maxpt=1750
if(pt==100):
ptmin=0.815*pt
ptmax=1.159*pt
if(pt==200):
ptmin=0.819*pt
ptmax=1.123*pt
if(pt==500):
ptmin=0.821*pt
ptmax=1.093*pt
if(pt==1000):
ptmin=0.8235*pt
ptmax=1.076*pt
if(var=="eta"):
#q1hist=rt.TH1F("q1hist","pp#rightarrowqq", res, -1,1)
#q2hist=rt.TH1F("q2hist","Z+jet", res, -1,1)
#g1hist=rt.TH1F("g1hist","dijet", res, -1,1)
#g2hist=rt.TH1F("g2hist","Z+jet", res, -1,1)
q1hist=rt.TH1F("q1hist","pp#rightarrowqq", res, -2.4,2.4)
q2hist=rt.TH1F("q2hist","Z+jet", res, -2.4,2.4)
g1hist=rt.TH1F("g1hist","dijet", res, -2.4,2.4)
g2hist=rt.TH1F("g2hist","Z+jet", res, -2.4,2.4)
elif(var=="pt"):
q1hist=rt.TH1F("q1hist","dijet", res, 0, maxpt)
q2hist=rt.TH1F("q2hist","Z+jet", res, 0, maxpt)
g1hist=rt.TH1F("g1hist","dijet", res, 0, maxpt)
g2hist=rt.TH1F("g2hist","Z+jet", res, 0, maxpt)
else:
maxv=int(0.8*maxv)
vb=int(maxv)
#if(pt==500 or pt==1000):vb=int(vb/2)
#vb=res
print("max",maxv)
q1hist=rt.TH1F("q1hist","dijet", vb,0, maxv)
q2hist=rt.TH1F("q2hist","Z+jet", vb,0, maxv)
g1hist=rt.TH1F("g1hist","dijet", vb,0, maxv)
g2hist=rt.TH1F("g2hist","Z+jet", vb,0, maxv)
res=int(vb)
for k in range(len(self.bdt)):
pair=np.argmax(self.eve[k])
pair=self.pairlist[np.argmax(self.eve[k])]
"""if(pair==0):
q1hist.Fill(self.bdt[k][var])
if(pair==1):
q2hist.Fill(self.bdt[k][var])
if(pair==2):
g1hist.Fill(self.bdt[k][var])
if(pair==3):
g2hist.Fill(self.bdt[k][var])"""
if(pair[0]=="q"):
q1hist.Fill(self.bdt[k][0])
if(pair[1]=="q"):
q2hist.Fill(self.bdt[k][5])
if(pair[0]=="g"):
g1hist.Fill(self.bdt[k][0])
if(pair[1]=="g"):
g2hist.Fill(self.bdt[k][5])
q1hist.Scale(1.0 / q1hist.Integral())
#q2hist.Scale(1.0 / q2hist.Integral())
g1hist.Scale(1.0 / g1hist.Integral())
#g2hist.Scale(1.0 / g2hist.Integral())
q1hist.Draw("hist")
q1=[]
q2=[]
g1=[]
g2=[]
for i in range(res):
q1.append(q1hist.GetBinContent(i+1))
q2.append(q2hist.GetBinContent(i+1))
g1.append(g1hist.GetBinContent(i+1))
g2.append(g2hist.GetBinContent(i+1))
print(g2hist.GetEntries())
fs=25
plt.figure(figsize=(12,8))
plt.title("jet $p_T$ range {}~{} GeV".format(pt,int(pt*1.1)),fontdict={"weight":"bold","size":fs*1.})
plt.ylabel("Fraction of Events",fontsize=fs*1.3)
if(var=='eta'):
xax=np.append(np.arange(-2.4,2.4,((-q1hist.GetBinLowEdge(0)+q1hist.GetBinLowEdge(res))/res))[:res],2.4)
plt.xlabel("jet $\eta$",fontsize=fs*1.3)
plt.fill_between(xax,np.append(q1,0),alpha=0.6,linewidth=2,facecolor='azure',edgecolor='C0',label=r"pp$\rightarrow$qq",step='post')
plt.plot(xax,np.append(q1,0),color='C0',alpha=0.5,drawstyle='steps-post',linewidth=2)
plt.plot(xax,np.append(q2,0),label=r"pp$\rightarrow$q2",color='blue',drawstyle='steps-post',linewidth=3)
plt.fill_between(xax,np.append(g1,0),alpha=0.3,linewidth=2,linestyle='--',facecolor="#ffcfdc",edgecolor='C1',label=r"pp$\rightarrow$gg",step='post')
#plt.plot(xax,np.append(g1,0),label=r"pp$\rightarrow$gg",color='C1',drawstyle='steps-post',linewidth=3,linestyle='--',alpha=0.6)
plt.plot(xax,np.append(g2,0),label=r"pp$\rightarrow$g2",color='red',drawstyle='steps-post',linewidth=3,linestyle='--')
else:
#if("chad" in var):plt.xlabel("Charged Particle Multiplicity",fontsize=fs*1.)
#elif("ptd" in var):plt.xlabel("jet $p_TD$",fontsize=fs*1.3)
#elif("axis" in var):plt.xlabel("jet {}".format(var),fontsize=fs*1.3)
#else:plt.xlabel("jet $p_T$(GeV)",fontsize=fs*1.3)
q1=np.concatenate([[0],q1])
q2=np.concatenate([[0],q2])
g1= | np.concatenate([[0],g1]) | numpy.concatenate |
import time
import cv2
import numpy as np
import json
import tensorflow as tf
from scipy.spatial import distance as dist
from shapely.geometry import Point
from shapely.geometry.polygon import Polygon
from .model.yolo import Yolo
with open("config.json", "r") as file:
config = json.load(file)
class YoloSocialDistance:
def __init__(self, tiny=False):
self.__class_names = {0: "person"}
self.__show_masks = config["showMasks"]
self.__show_fps = config["showFPS"]
self.__show_scores = config["showScores"]
self.__score_threshold = config["scoreThreshold"]
self.__iou_threshold = config["iouThreshold"]
self.__write_detection = config["writeDetection"]
self.__min_distance = 0
print("[INFO]: Building model")
model = Yolo(80, tiny)
print("[INFO]: Model built succesfully")
print("[INFO]: Loading detector weights")
model.load_weights(config["weightsPath"])
print("[INFO]: Detector weights loaded")
self.pedestrian_detector = model.get_graph()
def detect_from_image(self, image_path):
self.__pixel_to_meter()
self.__get_perspecive_points()
frame = cv2.imread(image_path)
if frame is None:
print("\n[WARNING]: Please enter a valid image path")
return
self.__detect_frame(frame)
if self.__write_detection:
cv2.imwrite("prediction.jpg", frame)
if not config["dontShow"]:
cv2.imshow("Image", frame)
key = cv2.waitKey(0)
if key == ord("q"):
cv2.destroyAllWindows()
def detect_from_video(self, src=0):
self.__pixel_to_meter()
self.__get_perspecive_points()
cap = cv2.VideoCapture(src, cv2.CAP_ANY)
avg_fps = []
if self.__write_detection:
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(cap.get(cv2.CAP_PROP_FPS))
output = cv2.VideoWriter(
filename="prediction.avi",
apiPreference=cv2.CAP_ANY,
fourcc=cv2.VideoWriter_fourcc("M", "J", "P", "G"),
fps=fps,
frameSize=(width, height),
)
while True:
prev_time = time.time()
retval, frame = cap.read(0)
if not retval:
print("Can't receive frame (stream end?). Exiting ...")
break
self.__detect_frame(frame)
pts = np.array(
[
self.__circles[0],
self.__circles[1],
self.__circles[3],
self.__circles[2],
],
np.int32,
)
pts = pts.reshape((-1, 1, 2))
cv2.polylines(frame, [pts], True, (0, 255, 255))
if not config["dontShow"]:
cv2.imshow("Frame", frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
if self.__show_fps:
fps = int(1 / (time.time() - prev_time))
avg_fps.append(fps)
print("FPS: {}".format(fps))
if self.__write_detection:
output.write(frame)
cap.release()
cv2.destroyAllWindows()
if len(avg_fps) != 0:
avg_fps = sum(avg_fps) / len(avg_fps)
print(f"[INFO]: Average FPS: {avg_fps}")
def __detect_frame(self, frame):
image_data = frame.copy()
image_data = cv2.cvtColor(image_data, cv2.COLOR_BGR2RGB)
image_data = cv2.resize(image_data, (416, 416))
image_data = image_data / 255.0
image_data = image_data.astype("float32")
image_data = np.expand_dims(image_data, axis=0)
image_data = tf.constant(image_data)
prediction = self.pedestrian_detector(image_data)
boxes = prediction[0, :, 0:4]
pred_conf = prediction[0, :, 4:]
boxes = | np.reshape(boxes, (1, boxes.shape[0], boxes.shape[1])) | numpy.reshape |
import numpy as np
def check_uv(u, v):
"""
Returns weightings for frequencies u and v
for anisotropic surfaces
"""
if abs(u) + abs(v) == 0:
return 4.
elif u * v == 0:
return 2.
return 1.
vcheck = np.vectorize(check_uv)
def wave_function(x, u, Lx):
"""
Wave in Fourier sum
"""
coeff = 2 * np.pi / Lx
if u >= 0:
return np.cos(coeff * u * x)
return np.sin(coeff * abs(u) * x)
def d_wave_function(x, u, Lx):
"""
First derivative of wave in Fourier sum wrt x
"""
coeff = 2 * np.pi / Lx
if u >= 0:
return - coeff * u * np.sin(coeff * u * x)
return coeff * abs(u) * np.cos(coeff * abs(u) * x)
def dd_wave_function(x, u, Lx):
"""
Second derivative of wave in Fouier sum wrt x
"""
coeff = 2 * np.pi / Lx
return - coeff ** 2 * u ** 2 * wave_function(x, u, Lx)
def cos_sin_indices(u_array):
"""Return indices of wave function arrays for
both cos and sin functions"""
cos_indices = np.argwhere(u_array >= 0)
sin_indices = | np.argwhere(u_array < 0) | numpy.argwhere |
import os
import random
from collections import namedtuple
import numpy as np
from tqdm import tqdm
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data._utils import collate
import wandb
def set_all_seeds(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
class ReplayBuffer:
def __init__(
self, replay_size, batch_size, device, observation_space, action_space
):
self.replay_size = replay_size
self.batch_size = batch_size
self.device = device
self.index = 0
self.full = False
self.states = np.empty((replay_size, observation_space), dtype=np.float32)
self.next_states = np.empty((replay_size, observation_space), dtype=np.float32)
self.actions = np.empty((replay_size, 1), dtype=np.int)
self.rewards = np.empty((replay_size, 1), dtype=np.float32)
self.dones = np.empty((replay_size, 1), dtype=np.float32)
def add(self, obs, action, reward, next_obs, done):
np.copyto(self.states[self.index], obs)
np.copyto(self.actions[self.index], action)
np.copyto(self.rewards[self.index], reward)
np.copyto(self.next_states[self.index], next_obs)
np.copyto(self.dones[self.index], float(done))
self.index = (self.index + 1) % self.replay_size
self.full = self.full or self.index == 0
def sample(self):
idxs = np.random.randint(
0, self.replay_size if self.full else self.index, size=self.batch_size
)
states = torch.as_tensor(self.states[idxs], device=self.device).float()
actions = torch.as_tensor(self.actions[idxs], device=self.device)
rewards = (
torch.as_tensor(self.rewards[idxs], device=self.device).squeeze().float()
)
next_states = torch.as_tensor(
self.next_states[idxs], device=self.device
).float()
dones = torch.as_tensor(self.dones[idxs], device=self.device).squeeze().float()
return states, actions, rewards, next_states, dones
class MLP(nn.Module):
def __init__(self, observation_space, action_space):
super(MLP, self).__init__()
self.fc1 = nn.Linear(observation_space, 64)
self.fc2 = nn.Linear(64, 32)
self.fc3 = nn.Linear(32, action_space)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
class DQN:
def __init__(
self,
env,
rbuffer,
value,
target,
optimizer,
device,
eps_start,
eps_decay,
eps_end,
totalnum_iterations,
gamma,
update_target,
):
self.eps_start = eps_start
self.eps_decay = eps_decay
self.eps_end = eps_end
self.epsilon = max(self.eps_start * self.eps_decay, self.eps_end)
self.batch_size = batch_size
self.env = env
self.rbuffer = rbuffer
self.value = value
self.target = target
self.optimizer = optimizer
self.device = device
self.totalnum_iterations = totalnum_iterations
self.gamma = gamma
self.update_target = update_target
self.loss_fn = torch.nn.MSELoss()
@torch.no_grad()
def get_action(self, obs):
if np.random.rand() < self.epsilon:
return env.action_space.sample()
else:
obs = torch.from_numpy(obs).float().unsqueeze(0).to(self.device)
return torch.argmax(self.value(obs)).item()
def train(self):
for episode in tqdm(range(0, self.totalnum_iterations)):
obs = env.reset()
done = False
episode_reward = 0
while not done:
action = self.get_action(obs)
new_obs, reward, done, _ = env.step(action)
self.rbuffer.add(obs, action, reward, new_obs, done)
episode_reward += reward
obs = new_obs
if self.rbuffer.index > self.batch_size:
states, actions, rewards, next_states, dones = self.rbuffer.sample()
next_qs = dones * rewards + (1 - dones) * (
rewards
+ self.gamma * self.target(next_states).detach().max(dim=1).values
)
current_qs = torch.gather(self.value(states), 1, actions).squeeze()
loss = self.loss_fn(current_qs, next_qs)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.epsilon = max(self.epsilon * self.eps_decay, self.eps_end)
wandb.log(
{
"Episode reward": episode_reward,
"Epsilon": self.epsilon,
"Loss": loss,
},
step=episode,
)
if episode % self.update_target == 0:
self.target.load_state_dict(self.value.state_dict())
def evaluate(self):
episode_rewards = []
for episode in range(10):
obs = self.env.reset()
done = False
episode_reward = 0
while not done:
action = self.get_action(obs)
new_obs, reward, done, _ = self.env.step(action)
self.env.render()
episode_reward += reward
obs = new_obs
episode_rewards.append(episode_reward)
print("Average episodes (10): ", np.mean(episode_rewards))
seed = | np.random.randint(1000) | numpy.random.randint |
from __future__ import absolute_import, division, print_function
from collections import Iterable, defaultdict, deque
from functools import reduce
import numbers
import operator
import numpy as np
import scipy.sparse
try: # Windows compatibility
int = long
except NameError:
pass
class COO(object):
""" A Sparse Multidimensional Array
This is stored in COO format. It depends on NumPy and Scipy.sparse for
computation, but supports arrays of arbitrary dimension.
Parameters
----------
coords: np.ndarray (ndim, nnz)
An array holding the index locations of every value
Should have shape (number of dimensions, number of non-zeros)
data: np.array (nnz,)
An array of Values
shape: tuple (ndim,), optional
The shape of the array
Examples
--------
>>> x = np.eye(4)
>>> x[2, 3] = 5
>>> s = COO(x)
>>> s
<COO: shape=(4, 4), dtype=float64, nnz=5, sorted=True, duplicates=False>
>>> s.data
array([ 1., 1., 1., 5., 1.])
>>> s.coords
array([[0, 1, 2, 2, 3],
[0, 1, 2, 3, 3]], dtype=uint8)
>>> s.dot(s.T).sum(axis=0).todense()
array([ 1., 1., 31., 6.])
Make a sparse array by passing in an array of coordinates and an array of
values.
>>> coords = [[0, 0, 0, 1, 1],
... [0, 1, 2, 0, 3],
... [0, 3, 2, 0, 1]]
>>> data = [1, 2, 3, 4, 5]
>>> y = COO(coords, data, shape=(3, 4, 5))
>>> y
<COO: shape=(3, 4, 5), dtype=int64, nnz=5, sorted=False, duplicates=True>
>>> tensordot(s, y, axes=(0, 1))
<COO: shape=(4, 3, 5), dtype=float64, nnz=6, sorted=False, duplicates=False>
Following scipy.sparse conventions you can also pass these as a tuple with
rows and columns
>>> rows = [0, 1, 2, 3, 4]
>>> cols = [0, 0, 0, 1, 1]
>>> data = [10, 20, 30, 40, 50]
>>> z = COO((data, (rows, cols)))
>>> z.todense()
array([[10, 0],
[20, 0],
[30, 0],
[ 0, 40],
[ 0, 50]])
You can also pass a dictionary or iterable of index/value pairs. Repeated
indices imply summation:
>>> d = {(0, 0, 0): 1, (1, 2, 3): 2, (1, 1, 0): 3}
>>> COO(d)
<COO: shape=(2, 3, 4), dtype=int64, nnz=3, sorted=False, duplicates=False>
>>> L = [((0, 0), 1),
... ((1, 1), 2),
... ((0, 0), 3)]
>>> COO(L).todense()
array([[4, 0],
[0, 2]])
See Also
--------
COO.from_numpy
COO.from_scipy_sparse
"""
__array_priority__ = 12
def __init__(self, coords, data=None, shape=None, has_duplicates=True,
sorted=False, cache=False):
self._cache = None
if cache:
self.enable_caching()
if data is None:
# {(i, j, k): x, (i, j, k): y, ...}
if isinstance(coords, dict):
coords = list(coords.items())
has_duplicates = False
if isinstance(coords, np.ndarray):
result = COO.from_numpy(coords)
self.coords = result.coords
self.data = result.data
self.has_duplicates = result.has_duplicates
self.sorted = result.sorted
self.shape = result.shape
return
# []
if not coords:
data = []
coords = []
# [((i, j, k), value), (i, j, k), value), ...]
elif isinstance(coords[0][0], Iterable):
if coords:
assert len(coords[0]) == 2
data = [x[1] for x in coords]
coords = [x[0] for x in coords]
coords = np.asarray(coords).T
# (data, (row, col, slab, ...))
else:
data = coords[0]
coords = np.stack(coords[1], axis=0)
self.data = np.asarray(data)
self.coords = np.asarray(coords)
if self.coords.ndim == 1:
self.coords = self.coords[None, :]
if shape and not np.prod(self.coords.shape):
self.coords = np.zeros((len(shape), 0), dtype=np.uint64)
if shape is None:
if self.coords.nbytes:
shape = tuple((self.coords.max(axis=1) + 1).tolist())
else:
shape = ()
self.shape = tuple(shape)
if self.shape:
dtype = np.min_scalar_type(max(self.shape))
else:
dtype = np.int_
self.coords = self.coords.astype(dtype)
assert not self.shape or len(data) == self.coords.shape[1]
self.has_duplicates = has_duplicates
self.sorted = sorted
def enable_caching(self):
""" Enable caching of reshape, transpose, and tocsr/csc operations
This enables efficient iterative workflows that make heavy use of
csr/csc operations, such as tensordot. This maintains a cache of
recent results of reshape and transpose so that operations like
tensordot (which uses both internally) store efficiently stored
representations for repeated use. This can significantly cut down on
computational costs in common numeric algorithms.
However, this also assumes that neither this object, nor the downstream
objects will have their data mutated.
Examples
--------
>>> x.enable_caching() # doctest: +SKIP
>>> csr1 = x.transpose((2, 0, 1)).reshape((100, 120)).tocsr() # doctest: +SKIP
>>> csr2 = x.transpose((2, 0, 1)).reshape((100, 120)).tocsr() # doctest: +SKIP
>>> csr1 is csr2 # doctest: +SKIP
True
"""
self._cache = defaultdict(lambda: deque(maxlen=3))
return self
@classmethod
def from_numpy(cls, x):
if x.shape:
coords = np.where(x)
data = x[coords]
coords = np.vstack(coords)
else:
coords = []
data = x
return cls(coords, data, shape=x.shape, has_duplicates=False,
sorted=True)
def todense(self):
self = self.sum_duplicates()
x = np.zeros(shape=self.shape, dtype=self.dtype)
coords = tuple([self.coords[i, :] for i in range(self.ndim)])
x[coords] = self.data
return x
@classmethod
def from_scipy_sparse(cls, x):
x = scipy.sparse.coo_matrix(x)
coords = np.empty((2, x.nnz), dtype=x.row.dtype)
coords[0, :] = x.row
coords[1, :] = x.col
return COO(coords, x.data, shape=x.shape,
has_duplicates=not x.has_canonical_format,
sorted=x.has_canonical_format)
@property
def dtype(self):
return self.data.dtype
@property
def ndim(self):
return len(self.shape)
@property
def nnz(self):
return self.coords.shape[1]
@property
def nbytes(self):
return self.data.nbytes + self.coords.nbytes
def __sizeof__(self):
return self.nbytes
def __getitem__(self, index):
if not isinstance(index, tuple):
index = (index,)
index = tuple(ind + self.shape[i] if isinstance(ind, numbers.Integral) and ind < 0 else ind
for i, ind in enumerate(index))
if (all(ind == slice(None) or ind == slice(0, d)
for ind, d in zip(index, self.shape))):
return self
mask = np.ones(self.nnz, dtype=bool)
for i, ind in enumerate([i for i in index if i is not None]):
if ind == slice(None, None):
continue
mask &= _mask(self.coords[i], ind)
n = mask.sum()
coords = []
shape = []
i = 0
for ind in index:
if isinstance(ind, numbers.Integral):
i += 1
continue
elif isinstance(ind, slice):
start = ind.start or 0
stop = ind.stop if ind.stop is not None else self.shape[i]
shape.append(min(stop, self.shape[i]) - start)
coords.append(self.coords[i][mask] - start)
i += 1
elif isinstance(ind, list):
old = self.coords[i][mask]
new = np.empty(shape=old.shape, dtype=old.dtype)
for j, item in enumerate(ind):
new[old == item] = j
coords.append(new)
shape.append(len(ind))
i += 1
elif ind is None:
coords.append(np.zeros(n))
shape.append(1)
for j in range(i, self.ndim):
coords.append(self.coords[j][mask])
shape.append(self.shape[j])
coords = np.stack(coords, axis=0)
shape = tuple(shape)
data = self.data[mask]
return COO(coords, data, shape=shape,
has_duplicates=self.has_duplicates,
sorted=self.sorted)
def __str__(self):
return "<COO: shape=%s, dtype=%s, nnz=%d, sorted=%s, duplicates=%s>" % (
self.shape, self.dtype, self.nnz, self.sorted,
self.has_duplicates)
__repr__ = __str__
def reduction(self, method, axis=None, keepdims=False, dtype=None):
if axis is None:
axis = tuple(range(self.ndim))
kwargs = {}
if dtype:
kwargs['dtype'] = dtype
if isinstance(axis, numbers.Integral):
axis = (axis,)
if set(axis) == set(range(self.ndim)):
result = getattr(self.data, method)(**kwargs)
else:
axis = tuple(axis)
neg_axis = list(range(self.ndim))
for ax in axis:
neg_axis.remove(ax)
neg_axis = tuple(neg_axis)
a = self.transpose(axis + neg_axis)
a = a.reshape((np.prod([self.shape[d] for d in axis]),
np.prod([self.shape[d] for d in neg_axis])))
a = a.to_scipy_sparse()
a = getattr(a, method)(axis=0, **kwargs)
if isinstance(a, scipy.sparse.spmatrix):
a = COO.from_scipy_sparse(a)
a.sorted = self.sorted
a.has_duplicates = False
elif isinstance(a, np.matrix):
a = np.asarray(a)[0]
a = COO.from_numpy(a)
a = a.reshape([self.shape[d] for d in neg_axis])
result = a
if keepdims:
result = _keepdims(self, result, axis)
return result
def sum(self, axis=None, keepdims=False, dtype=None, out=None):
return self.reduction('sum', axis=axis, keepdims=keepdims, dtype=dtype)
def max(self, axis=None, keepdims=False, out=None):
x = self.reduction('max', axis=axis, keepdims=keepdims)
# TODO: verify that there are some missing elements in each entry
if isinstance(x, COO):
x.data[x.data < 0] = 0
return x
elif isinstance(x, np.ndarray):
x[x < 0] = 0
return x
else:
return np.max(x, 0)
def transpose(self, axes=None):
if axes is None:
axes = reversed(range(self.ndim))
axes = tuple(axes)
if axes == tuple(range(self.ndim)):
return self
if self._cache is not None:
for ax, value in self._cache['transpose']:
if ax == axes:
return value
shape = tuple(self.shape[ax] for ax in axes)
result = COO(self.coords[axes, :], self.data, shape,
has_duplicates=self.has_duplicates,
cache=self._cache is not None)
if self._cache is not None:
self._cache['transpose'].append((axes, result))
return result
@property
def T(self):
return self.transpose(list(range(self.ndim))[::-1])
def dot(self, other):
return dot(self, other)
def __matmul__(self, other):
try:
return dot(self, other)
except NotImplementedError:
return NotImplemented
def __rmatmul__(self, other):
try:
return dot(other, self)
except NotImplementedError:
return NotImplemented
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kwargs):
return NotImplemented
def linear_loc(self, signed=False):
""" Index location of every piece of data in a flattened array
This is used internally to check for duplicates, re-order, reshape,
etc..
"""
n = reduce(operator.mul, self.shape)
if signed:
n = -n
dtype = np.min_scalar_type(n)
out = np.zeros(self.nnz, dtype=dtype)
tmp = np.zeros(self.nnz, dtype=dtype)
strides = 1
for i, d in enumerate(self.shape[::-1]):
# out += self.coords[-(i + 1), :].astype(dtype) * strides
np.multiply(self.coords[-(i + 1), :], strides, out=tmp, dtype=dtype)
np.add(tmp, out, out=out)
strides *= d
return out
def reshape(self, shape):
if self.shape == shape:
return self
if any(d == -1 for d in shape):
extra = int(np.prod(self.shape) /
np.prod([d for d in shape if d != -1]))
shape = tuple([d if d != -1 else extra for d in shape])
if self.shape == shape:
return self
if self._cache is not None:
for sh, value in self._cache['reshape']:
if sh == shape:
return value
# TODO: this np.prod(self.shape) enforces a 2**64 limit to array size
linear_loc = self.linear_loc()
coords = np.empty((len(shape), self.nnz), dtype=np.min_scalar_type(max(shape)))
strides = 1
for i, d in enumerate(shape[::-1]):
coords[-(i + 1), :] = (linear_loc // strides) % d
strides *= d
result = COO(coords, self.data, shape,
has_duplicates=self.has_duplicates,
sorted=self.sorted, cache=self._cache is not None)
if self._cache is not None:
self._cache['reshape'].append((shape, result))
return result
def to_scipy_sparse(self):
assert self.ndim == 2
result = scipy.sparse.coo_matrix((self.data,
(self.coords[0],
self.coords[1])),
shape=self.shape)
result.has_canonical_format = (not self.has_duplicates and self.sorted)
return result
def _tocsr(self):
assert self.ndim == 2
# Pass 1: sum duplicates
self.sum_duplicates()
# Pass 2: sort indices
self.sort_indices()
row, col = self.coords
# Pass 3: count nonzeros in each row
indptr = np.zeros(self.shape[0] + 1, dtype=np.int64)
np.cumsum(np.bincount(row, minlength=self.shape[0]), out=indptr[1:])
return scipy.sparse.csr_matrix((self.data, col, indptr), shape=self.shape)
def tocsr(self):
if self._cache is not None:
try:
return self._csr
except AttributeError:
pass
try:
self._csr = self._csc.tocsr()
return self._csr
except AttributeError:
pass
self._csr = csr = self._tocsr()
else:
csr = self._tocsr()
return csr
def tocsc(self):
if self._cache is not None:
try:
return self._csc
except AttributeError:
pass
try:
self._csc = self._csr.tocsc()
return self._csc
except AttributeError:
pass
self._csc = csc = self.tocsr().tocsc()
else:
csc = self.tocsr().tocsc()
return csc
def sort_indices(self):
if self.sorted:
return
linear = self.linear_loc(signed=True)
if (np.diff(linear) > 0).all(): # already sorted
self.sorted = True
return self
order = np.argsort(linear)
self.coords = self.coords[:, order]
self.data = self.data[order]
self.sorted = True
return self
def sum_duplicates(self):
# Inspired by scipy/sparse/coo.py::sum_duplicates
# See https://github.com/scipy/scipy/blob/master/LICENSE.txt
if not self.has_duplicates:
return self
if not np.prod(self.coords.shape):
return self
self.sort_indices()
linear = self.linear_loc()
unique_mask = np.diff(linear) != 0
if unique_mask.sum() == len(unique_mask): # already unique
self.has_duplicates = False
return self
unique_mask = np.append(True, unique_mask)
coords = self.coords[:, unique_mask]
(unique_inds,) = np.nonzero(unique_mask)
data = np.add.reduceat(self.data, unique_inds, dtype=self.data.dtype)
self.data = data
self.coords = coords
self.has_duplicates = False
return self
def __add__(self, other):
if isinstance(other, numbers.Number) and other == 0:
return self
if not isinstance(other, COO):
return self.maybe_densify() + other
if self.shape == other.shape:
return self.elemwise_binary(operator.add, other)
else:
raise NotImplementedError("Broadcasting not yet supported")
def __radd__(self, other):
return self + other
def __neg__(self):
return COO(self.coords, -self.data, self.shape, self.has_duplicates,
self.sorted)
def __sub__(self, other):
return self + (-other)
def __rsub__(self, other):
return -self + other
def __mul__(self, other):
if isinstance(other, COO):
return self.elemwise_binary(operator.mul, other)
else:
return self.elemwise(operator.mul, other)
__rmul__ = __mul__
def __truediv__(self, other):
return self.elemwise(operator.truediv, other)
def __floordiv__(self, other):
return self.elemwise(operator.floordiv, other)
__div__ = __truediv__
def __pow__(self, other):
return self.elemwise(operator.pow, other)
def elemwise(self, func, *args, **kwargs):
if kwargs.pop('check', True) and func(0, *args, **kwargs) != 0:
raise ValueError("Performing this operation would produce "
"a dense result: %s" % str(func))
return COO(self.coords, func(self.data, *args, **kwargs),
shape=self.shape,
has_duplicates=self.has_duplicates,
sorted=self.sorted)
def elemwise_binary(self, func, other, *args, **kwargs):
assert isinstance(other, COO)
if kwargs.pop('check', True) and func(0, 0, *args, **kwargs) != 0:
raise ValueError("Performing this operation would produce "
"a dense result: %s" % str(func))
if self.shape != other.shape:
raise NotImplementedError("Broadcasting is not supported")
self.sum_duplicates() # TODO: document side-effect or make copy
other.sum_duplicates() # TODO: document side-effect or make copy
# Sort self.coords in lexographical order using record arrays
self_coords = np.rec.fromarrays(self.coords)
i = np.argsort(self_coords)
self_coords = self_coords[i]
self_data = self.data[i]
# Convert other.coords to a record array
other_coords = | np.rec.fromarrays(other.coords) | numpy.rec.fromarrays |
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 28 17:08:13 2015
@author: jordan
"""
import os
import numpy as np
from astropy.table import Table, hstack
from astropy.stats import sigma_clipped_stats
from astropy.wcs import WCS
from astropy.wcs.utils import proj_plane_pixel_area
from astropy import units as u
from astropy.coordinates import SkyCoord
from matplotlib import pyplot as plt
from photutils import daofind, aperture_photometry, CircularAperture, CircularAnnulus
import pdb
from pyPol import Image
#Setup the path delimeter for this operating system
delim = os.path.sep
# Grab all the *.fits files in the reduced science data directory
reducedDir = '/home/jordan/ThesisData/PRISM_Data/Reduced_data'
# Setup new directory for polarimetry data
polarimetryDir = reducedDir + delim + 'Polarimetry'
if (not os.path.isdir(polarimetryDir)):
os.mkdir(polarimetryDir, 0o755)
polAngDir = polarimetryDir + delim + 'polAngImgs'
if (not os.path.isdir(polAngDir)):
os.mkdir(polAngDir, 0o755)
stokesDir = polarimetryDir + delim + 'stokesImgs'
if (not os.path.isdir(stokesDir)):
os.mkdir(stokesDir, 0o755)
# Read in the indexFile data and select the filenames
print('\nReading file index from disk')
indexFile = 'fileIndex.csv'
fileIndex = Table.read(indexFile, format='csv')
#fileIndex = ascii.read(indexFile, guess=False, delimiter=',')
fileList = fileIndex['Filename']
# Determine which parts of the fileIndex pertain to science images
useFiles = np.logical_and((fileIndex['Use'] == 1), (fileIndex['Dither'] == 'ABBA'))
# Cull the file index to only include files selected for use
fileIndex = fileIndex[np.where(useFiles)]
# Group the fileIndex by...
# 1. Target
# 2. Waveband
# 3. Dither (pattern)
fileIndexByTarget = fileIndex.group_by(['Target', 'Waveband', 'Dither'])
# Define any required conversion constants
rad2deg = (180.0/np.pi)
deg2rad = (np.pi/180.0)
# Use the following data for final calibration
# ...
# Magnitude zero point from Bessel 1990, PASP 102, 1181
# Extinction from <NAME> & Mathis 1989, ApJ 345, 245 or Rieke and Lebosky 1985, ApJ
# ...
# Bands and zero point flux [in Jy = 10^(-26) W /(m^2 Hz)]
zeroFlux = dict(zip(['U', 'B', 'V', 'R'],
[1884, 4646, 3953, 2875]))
# Loop through each group
groupKeys = fileIndexByTarget.groups.keys
for group in fileIndexByTarget.groups:
# Grab the current target information
thisTarget = str(np.unique(group['Target'].data)[0])
thisWaveband = str(np.unique(group['Waveband'].data)[0])
numImgs = len(group)
print('\nProcessing images for'.format(numImgs))
print('\tTarget : {0}'.format(thisTarget))
print('\tWaveband : {0}'.format(thisWaveband))
# Calibrate stokes intensity from USNOB stars...
catalogFile = stokesDir + delim + thisTarget + '_stars.xml'
starCatalog = Table.read(catalogFile)
thisMag = thisWaveband + 'mag'
theseStars = np.logical_not(starCatalog[thisMag].mask)
# Cull the catalog to those entries with relevant photometric data
starCatalog = starCatalog[np.where(theseStars)]
# Form a "catalog" of position entries for matching
ra1 = starCatalog['RAJ2000']
dec1 = starCatalog['DEJ2000']
catalog1 = SkyCoord(ra = ra1, dec = dec1, frame = 'fk5')
# Read in the image and find tars in the image
Ifile = (stokesDir + delim +
'_'.join([thisTarget, thisWaveband, 'I']) + '.fits')
stokesI = Image(Ifile)
mean, median, std = sigma_clipped_stats(stokesI.arr, sigma=3.0, iters=5)
threshold = median + 3.0*std
fwhm = 3.0
sources = daofind(stokesI.arr, threshold, fwhm, ratio=1.0, theta=0.0,
sigma_radius=1.5, sharplo=0.2, sharphi=1.0,
roundlo=-1.0, roundhi=1.0, sky=0.0,
exclude_border=True)
# Convert source positions to RA and Dec
wcs = WCS(stokesI.header)
ADstars = wcs.all_pix2world(sources['xcentroid'], sources['ycentroid'], 0)
catalog2 = SkyCoord(ra = ADstars[0]*u.deg, dec = ADstars[1]*u.deg, frame = 'fk5')
###
### This slow, meat-axe method was useful for verification.
### It produces the same results as the method below.
###
# # Loop through each of the detected sources, and check for possible confusion
# keepStars = []
# numCat1Match = []
# numCat2Match = []
# for i in range(len(catalog2)):
# # Establish the coordinates of the current star
# thisCoord = SkyCoord(ra = catalog2[i].ra, dec = catalog2[i].dec)
#
# # Compute the distances from this star to other stars in both catalogs
# d2d_cat1 = thisCoord.separation(catalog1)
# d2d_cat2 = thisCoord.separation(catalog2)
#
# # Check if there is more than one star nearby in EITHER catalog
# numCat1Match.append(np.sum(d2d_cat1 < 20*u.arcsec))
# numCat2Match.append(np.sum(d2d_cat2 < 20*u.arcsec))
# keepStars.append(numCat1Match[i] == 1 and numCat2Match[i] == 1)
#Search for all possible matches within 10 arcsec of a given detected source
idxc1, idxc2, d2d, d3d = catalog2.search_around_sky(catalog1, 20*u.arcsec)
# Detect which matches from catalog2 to catalog1 are unique
bins, freq = np.unique(idxc2, return_inverse=True)
cat2Keep = np.bincount(freq) == 1
isoCatalog2 = bins[np.where(cat2Keep)]
# Cull the decteced sources catalog to only include good, isolated stars
xStars = sources['xcentroid'][isoCatalog2]
yStars = sources['ycentroid'][isoCatalog2]
# Now check for saturation near this star
saturatedStars = []
for xStar, yStar in zip(xStars, yStars):
lf, rt = xStar - 10, xStar + 10
bt, tp = yStar - 10, yStar + 10
patch = stokesI.arr[lf:rt, bt:tp]
saturatedStars.append(np.sum(np.logical_or(patch > 9e3, patch < -100)) > 1)
# Check for stars near the edge of the image
ny, nx = stokesI.arr.shape
edgeStars = np.logical_or(
np.logical_or(xStars < 40, xStars > (nx - 40)),
np.logical_or(yStars < 40, yStars > (ny - 40)))
# Now let's do aperture photometry on the remaining sources in the image
# 1. Setup the apertures
sourcePos = [xStars, yStars]
apertures = CircularAperture(sourcePos, r = 6.0)
annulus_apertures = CircularAnnulus(sourcePos, r_in=12., r_out=14.)
# 2. Perform the basic photometry
rawflux_table = aperture_photometry(stokesI.arr, apertures)
bkgflux_table = aperture_photometry(stokesI.arr, annulus_apertures)
phot_table = hstack([rawflux_table, bkgflux_table], table_names=['raw', 'bkg'])
# 3. Compute background contribution and subtract from raw photometry
bkg_mean = phot_table['aperture_sum_bkg'] / annulus_apertures.area()
bkg_sum = bkg_mean * apertures.area()
final_sum = phot_table['aperture_sum_raw'] - bkg_sum
phot_table['residual_aperture_sum'] = final_sum
# Compute the signal-to-noise ratio and find the stars with SNR < 3.0
SNR = final_sum/bkg_sum
bkgDominated = SNR < 1.0
# ###########################################################################
# # PRINT OUT THE PHOTOMETRY TO CHECK FOR CONSISTENCY
# ###########################################################################
# xFmtStr = '{x[0]:>6}.{x[1]:<3}'
# yFmtStr = '{y[0]:>6}.{y[1]:<3}'
# starFmtStr = '{star[0]:>9}.{star[1]:<3}'
# bkgFmtStr = '{bkg[0]:>9}.{bkg[1]:<3}'
# snrFmtStr = '{snr[0]:>9}.{snr[1]:<3}'
# print('final photometry is...')
# print(' x y Star Flux Bkg Flux SNR')
# print('===========================================================')
# printStr = xFmtStr + yFmtStr + starFmtStr + bkgFmtStr + snrFmtStr
# for i in range(len(SNR)):
# xVal = str(xStars[i]).split('.')
# xVal[1] = (xVal[1])[0:3]
# yVal = str(yStars[i]).split('.')
# yVal[1] = (yVal[1])[0:3]
# starVal = str(final_sum[i]).split('.')
# starVal[1] = (starVal[1])[0:3]
# bkgVal = str(bkg_sum[i]).split('.')
# bkgVal[1] = (bkgVal[1])[0:3]
# snrVal = str(SNR[i]).split('.')
# snrVal[1] = (snrVal[1])[0:3]
# print(printStr.
# format(x = xVal, y = yVal, star = starVal, bkg = bkgVal, snr = snrVal))
# Cull the lists to juts keep
# non-saturated,
# non-edge,
# non-background-dominated stars
keepStars = np.logical_not(
np.logical_or(
| np.logical_or(saturatedStars, edgeStars) | numpy.logical_or |
"""Wild Life
An interactive version of Conway’s Life Game, written in Python.
Source and description:
https://github.com/pawelbudziszewski/Wild-Life
Copyright 2021, 2022 <NAME>
This is an interactive version of Conway’s Life Game, written in Python.
It allows placing different life forms using mouse while game is running.
How to run it:
On Windows just execute 'python3.exe wild_life.py'
In theory this code should run fine also on Linux, but I did not test it.
How use it:
- Mouse-click anywhere to insert species
- Mouse-click on the species list in the bottom to select species to
be inserted
- [1], [2], [3], [4] keys to change color map
- [Esc] to exit
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
import random
import numpy as np
import cv2
from scipy.signal import convolve2d
import life_forms
## Configuration
# Size of the world (number of grid cells)
WIDTH=600
HEIGHT=300
# Magnification - each cell will be displayed as NxN pixels.
N=2
# Do we want to wrap the world? If yes, everything that moves outside
# of the border will appear at opposite border.
WRAP_WORLD = False
# Dying life cells leaves shadows. FADE_COEFFICIENT determines,
# how long this shadow will last. Greater value - longer shadow
# lifetime. Value of 0.0 means no shadows.
# This value should be larger or equal to 0 and lower than 1 - other
# values are technically possible, but may give awkward results (have
# fun testing).
FADE_COEFFICIENT = 0.6
# Color maps we can use
COLOR_MAPS = [cv2.COLORMAP_BONE,
cv2.COLORMAP_HOT,
cv2.COLORMAP_OCEAN,
cv2.COLORMAP_PINK
]
# Initial color map
INITIAL_COLOR_MAP = 0
# Life forms to be used in menu (see life_forms.py)
SPECIES_MENU_ITEMS = [
life_forms.GOSPER_GLIDER_GUN,
life_forms.GLIDERS,
life_forms.TURTLE_R,
life_forms.BLIMKER_PUFFER,
life_forms.KOK_GALAXY,
life_forms.PULSAR,
life_forms.BLANK,
]
# Initially selected life form
INITIAL_SPECIES_MENU_ITEM = 0
class WildLife:
""" Life class"""
def __init__(self):
self.W = WIDTH
self.H = HEIGHT
self.N = N
self.current_species = INITIAL_SPECIES_MENU_ITEM
# This will be our arena
self.world = np.zeros([self.H,self.W])
# Arena transformed into image to be displayed
self.world_img=np.ones([self.H,self.W])*0.5
self.color_map_id = INITIAL_COLOR_MAP
self.generate_menu()
def generate_image(self):
""" Generate OpenCV image based on game's world and menu
"""
img = self.world_img*-1+1
remapped = cv2.applyColorMap((img*255).astype(np.uint8), COLOR_MAPS[self.color_map_id])
remapped = np.vstack((remapped,self.menu))
if self.N>1:
remapped=remapped.repeat(self.N, axis=0).repeat(self.N, axis=1)
return remapped
def life_step(self):
""" One step of life cycle
"""
if WRAP_WORLD:
boundary = 'wrap'
else:
boundary = 'fill'
neighbors_count = convolve2d(self.world, | np.ones((3, 3)) | numpy.ones |
# --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME> and <NAME>
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from model.config import cfg
import roi_data_layer.roidb as rdl_roidb
from roi_data_layer.layer import RoIDataLayer
from utils.timer import Timer
import keras as k
try:
import cPickle as pickle
except ImportError:
import pickle
import numpy as np
import os
import sys
import glob
import time
import math
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from model.bbox_transform import clip_boxes, bbox_transform_inv
from utils.cython_bbox import bbox_overlaps
def _clip_boxes(boxes, im_shape):
"""Clip boxes to image boundaries."""
# x1 >= 0
boxes[:, 0] = np.maximum(boxes[:, 0], 0)
# y1 >= 0
boxes[:, 1] = np.maximum(boxes[:, 1], 0)
# x2 < im_shape[1]
boxes[:, 2] = np.minimum(boxes[:, 2], im_shape[1] - 1)
# y2 < im_shape[0]
boxes[:, 3] = np.minimum(boxes[:, 3], im_shape[0] - 1)
return boxes
class SolverWrapper(object):
"""
A wrapper class for the training process
"""
def __init__(self, sess, network, imdb, roidb, valroidb, output_dir = "", tbdir = "", pretrained_model=""):
self.net = network
self.imdb = imdb
self.roidb = roidb
self.valroidb = valroidb
self.output_dir = output_dir
self.tbdir = tbdir
# Simply put '_val' at the end to save the summaries from the validation set
self.tbvaldir = tbdir + '_val'
if not os.path.exists(self.tbvaldir):
os.makedirs(self.tbvaldir)
self.pretrained_model = pretrained_model
def snapshot(self, sess, iter):
net = self.net
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
# Store the model snapshot
filename = cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_{:d}'.format(iter) + '.ckpt'
filename = os.path.join(self.output_dir, filename)
self.saver.save(sess, filename)
print('Wrote snapshot to: {:s}'.format(filename))
# Also store some meta information, random state, etc.
nfilename = cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_{:d}'.format(iter) + '.pkl'
nfilename = os.path.join(self.output_dir, nfilename)
# current state of numpy random
st0 = np.random.get_state()
# current position in the database
cur = self.data_layer._cur
# current shuffled indexes of the database
perm = self.data_layer._perm
# current position in the validation database
cur_val = self.data_layer_val._cur
# current shuffled indexes of the validation database
perm_val = self.data_layer_val._perm
# Dump the meta info
with open(nfilename, 'wb') as fid:
pickle.dump(st0, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(cur, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(perm, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(cur_val, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(perm_val, fid, pickle.HIGHEST_PROTOCOL)
pickle.dump(iter, fid, pickle.HIGHEST_PROTOCOL)
return filename, nfilename
def from_snapshot(self, sess, sfile, nfile):
print('Restoring model snapshots from {:s}'.format(sfile))
init = tf.global_variables_initializer()
sess.run(init)
self.saver.restore(sess, sfile)
print('Restored.')
# Needs to restore the other hyper-parameters/states for training, (TODO xinlei) I have
# tried my best to find the random states so that it can be recovered exactly
# However the Tensorflow state is currently not available
with open(nfile, 'rb') as fid:
st0 = pickle.load(fid)
cur = pickle.load(fid)
perm = pickle.load(fid)
cur_val = pickle.load(fid)
perm_val = pickle.load(fid)
last_snapshot_iter = pickle.load(fid)
np.random.set_state(st0)
return last_snapshot_iter
def get_variables_in_checkpoint_file(self, file_name):
try:
reader = pywrap_tensorflow.NewCheckpointReader(file_name)
var_to_shape_map = reader.get_variable_to_shape_map()
return var_to_shape_map
except Exception as e: # pylint: disable=broad-except
print(str(e))
if "corrupted compressed block contents" in str(e):
print("It's likely that your checkpoint file has been compressed "
"with SNAPPY.")
def construct_graph(self, sess):
with sess.graph.as_default():
# Set the random seed for tensorflow
tf.set_random_seed(cfg.RNG_SEED)
# Build the main computation graph
layers = self.net.create_architecture('TRAIN', self.imdb.num_classes, tag='default',
anchor_scales=cfg.ANCHOR_SCALES,
anchor_ratios=cfg.ANCHOR_RATIOS)
# Define the loss
loss = layers['total_loss']
# Set learning rate and momentum
lr = tf.Variable(cfg.TRAIN.LEARNING_RATE, trainable=False)
self.optimizer = tf.train.MomentumOptimizer(lr, cfg.TRAIN.MOMENTUM)
#self.optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)
# Compute the gradients with regard to the loss
gvs = self.optimizer.compute_gradients(loss)
# Double the gradient of the bias if set
if cfg.TRAIN.DOUBLE_BIAS:
final_gvs = []
with tf.variable_scope('Gradient_Mult') as scope:
for grad, var in gvs:
scale = 1.
if cfg.TRAIN.DOUBLE_BIAS and '/biases:' in var.name:
scale *= 2.
if not np.allclose(scale, 1.0):
grad = tf.multiply(grad, scale)
final_gvs.append((grad, var))
train_op = self.optimizer.apply_gradients(final_gvs)
else:
train_op = self.optimizer.apply_gradients(gvs)
# Write the train and validation information to tensorboard
self.writer = tf.summary.FileWriter(self.tbdir, sess.graph)
self.valwriter = tf.summary.FileWriter(self.tbvaldir)
return lr, train_op
def find_previous(self, max_iters):
sfiles = os.path.join(self.output_dir, cfg.TRAIN.SNAPSHOT_LOAD_PREFIX + '_iter_*.ckpt.meta')
sfiles = glob.glob(sfiles)
sfiles.sort(key=os.path.getmtime)
# Get the snapshot name in TensorFlow
redfiles = []
for stepsize in range(0, max_iters, cfg.TRAIN.SNAPSHOT_ITERS):
redfiles.append(os.path.join(self.output_dir, cfg.TRAIN.SNAPSHOT_LOAD_PREFIX + '_iter_{:d}.ckpt.meta'.format(stepsize)))
sfiles = [ss.replace('.meta', '') for ss in sfiles if ss in redfiles]
nfiles = os.path.join(self.output_dir, cfg.TRAIN.SNAPSHOT_LOAD_PREFIX + '_iter_*.pkl')
nfiles = glob.glob(nfiles)
nfiles.sort(key=os.path.getmtime)
redfiles = [redfile.replace('.ckpt.meta', '.pkl') for redfile in redfiles]
nfiles = [nn for nn in nfiles if nn in redfiles]
lsf = len(sfiles)
assert len(nfiles) == lsf
return lsf, nfiles, sfiles
def initialize(self, sess):
# Initial file lists are empty
np_paths = []
ss_paths = []
# Fresh train directly from ImageNet weights
print('Loading initial model weights from {:s}'.format(self.pretrained_model))
variables = tf.global_variables()
# Initialize all variables first
sess.run(tf.variables_initializer(variables, name='init'))
var_keep_dic = self.get_variables_in_checkpoint_file(self.pretrained_model)
# Get the variables to restore, ignoring the variables to fix
variables_to_restore = self.net.get_variables_to_restore(variables, var_keep_dic)
init = tf.global_variables_initializer()
sess.run(init)
restorer = tf.train.Saver(variables_to_restore)
restorer.restore(sess, self.pretrained_model)
print('Loaded.')
# Need to fix the variables before loading, so that the RGB weights are changed to BGR
# For VGG16 it also changes the convolutional weights fc6 and fc7 to
# fully connected weights
self.net.fix_variables(sess, self.pretrained_model)
print('Fixed.')
last_snapshot_iter = 0
rate = cfg.TRAIN.LEARNING_RATE
stepsizes = list(cfg.TRAIN.STEPSIZE)
return rate, last_snapshot_iter, stepsizes, np_paths, ss_paths
def restore(self, sess, sfile, nfile):
# Get the most recent snapshot and restore
np_paths = [nfile]
ss_paths = [sfile]
# Restore model from snapshots
last_snapshot_iter = self.from_snapshot(sess, sfile, nfile)
self.net.fix_variables(sess, sfile)
print('Fixed.')
# Set the learning rate
rate = cfg.TRAIN.LEARNING_RATE
stepsizes = cfg.TRAIN.STEPSIZE
return rate, last_snapshot_iter, stepsizes, np_paths, ss_paths
def remove_snapshot(self, np_paths, ss_paths):
to_remove = len(np_paths) - cfg.TRAIN.SNAPSHOT_KEPT
for c in range(to_remove):
nfile = np_paths[0]
os.remove(str(nfile))
np_paths.remove(nfile)
to_remove = len(ss_paths) - cfg.TRAIN.SNAPSHOT_KEPT
for c in range(to_remove):
sfile = ss_paths[0]
# To make the code compatible to earlier versions of Tensorflow,
# where the naming tradition for checkpoints are different
if os.path.exists(str(sfile)):
os.remove(str(sfile))
else:
os.remove(str(sfile + '.data-00000-of-00001'))
os.remove(str(sfile + '.index'))
sfile_meta = sfile + '.meta'
os.remove(str(sfile_meta))
ss_paths.remove(sfile)
def train_model(self, sess, max_iters, just_test = False):
# Build data layers for both training and validation set
self.data_layer = RoIDataLayer(self.roidb, self.imdb.num_classes)
self.data_layer_val = RoIDataLayer(self.valroidb, self.imdb.num_classes, random=True)
# Construct the computation graph
lr, train_op = self.construct_graph(sess)
# Find previous snapshots if there is any to restore from
lsf, nfiles, sfiles = self.find_previous(max_iters)
# Initialize the variables or restore them from the last snapshot
if lsf == 0:
variables = tf.global_variables()
var_keep_dic = self.get_variables_in_checkpoint_file(self.pretrained_model)
variables_to_restore = self.net.get_variables_to_restore(variables, var_keep_dic)
self.saver = tf.train.Saver(variables_to_restore, max_to_keep=100000, reshape=True)
rate, last_snapshot_iter, stepsizes, np_paths, ss_paths = self.initialize(sess)
else:
variables = tf.global_variables()
var_keep_dic = self.get_variables_in_checkpoint_file(sfiles[-1])
variables_to_restore = self.net.get_variables_to_restore(variables, var_keep_dic)
self.saver = tf.train.Saver(variables_to_restore, max_to_keep=100000, reshape=True)
rate, last_snapshot_iter, stepsizes, np_paths, ss_paths = self.restore(sess,
str(sfiles[-1]),
str(nfiles[-1]))
self.saver = tf.train.Saver(max_to_keep=100000, reshape=True)
iter = 1
np_paths = []
ss_paths = []
# Make sure the lists are not empty
stepsizes.append(max_iters)
stepsizes.reverse()
next_stepsize = stepsizes.pop()
best_result = 0.0
sess.run(tf.assign(lr, rate))
while iter < max_iters + 1:
if iter == next_stepsize + 1:
# Add snapshot here before reducing the learning rate
rate *= cfg.TRAIN.GAMMA
sess.run(tf.assign(lr, rate))
next_stepsize = stepsizes.pop()
if not just_test:
self.run_epoch(iter, self.data_layer, "train", sess, lr, train_op)
result = self.run_epoch(iter, self.data_layer_val, "validation", sess, lr, None)
else:
self.run_epoch(iter, self.data_layer_val, "test", sess, lr, None)
result = - 1.0
# Snapshotting
if iter % cfg.TRAIN.SNAPSHOT_ITERS == 0 and not just_test:
last_snapshot_iter = iter
ss_path, np_path = self.snapshot(sess, iter)
np_paths.append(np_path)
ss_paths.append(ss_path)
# Remove the old snapshots if there are too many
try:
if len(np_paths) > cfg.TRAIN.SNAPSHOT_KEPT:
self.remove_snapshot(np_paths, ss_paths)
except:
print("failed to remove snapshot")
if result > best_result:
self.snapshot(sess, 0)
best_result = result
print(">>> best_result %f" % (best_result))
iter += 1
if last_snapshot_iter != iter - 1 and not just_test:
self.snapshot(sess, iter - 1)
self.writer.close()
self.valwriter.close()
def run_epoch(self, epoch, data_layer, name, sess, lr, train_op):
accum_results = None
accum_losses = None
epoch_iter = 0.0
timer = Timer()
# Get training data, one batch at a time
while True:
timer.tic()
blobs, new_epoch = data_layer.forward()
if new_epoch and epoch_iter != 0.0:
print_stat(name, epoch, epoch_iter, lr, accum_results, accum_losses)
sub_iou = float(accum_results['sub_iou']) / accum_results['total']
obj_iou = float(accum_results['obj_iou']) / accum_results['total']
return (sub_iou + obj_iou) / 2
if blobs["query"].shape[0] == 0 or blobs["gt_boxes"].shape[0] == 0:
continue
# Compute the graph without summary
try:
losses, predictions, proposal_targets = self.net.train_step(sess, blobs, train_op)
if math.isnan(losses["total_loss"]):
print("total loss is nan - iter %d" % (int(epoch_iter)))
continue
gt_bbox = blobs['gt_boxes']
gt = blobs['gt_labels'][:, :, 0]
im = blobs["im_info"]
gt_ent = blobs['partial_entity_class']
gt_rel = blobs['partial_relation_class']
boxes0 = predictions["rois"][:, 1:5]
# Apply bounding-box regression deltas
pred_boxes = np.reshape(predictions["pred_bbox_gpi"], [predictions["pred_bbox_gpi"].shape[0], -1])
box_deltas = pred_boxes * np.array(cfg.TRAIN.BBOX_NORMALIZE_STDS) + np.array(cfg.TRAIN.BBOX_NORMALIZE_MEANS)
pred_boxes = bbox_transform_inv(boxes0, box_deltas)
pred_boxes = _clip_boxes(pred_boxes, im)
# normalize boxes to [0-1]
gt_bbox_norm = gt_bbox.copy()
for i in range(4):
pred_boxes[:, i] /= im[(i+1)%2]
gt_bbox_norm[:, i] /= im[(i+1)%2]
boxes0[:, i] /= im[(i+1)%2]
rpn_results = rpn_test(gt_bbox_norm, boxes0, pred_boxes, gt_ent, gt_rel, predictions)
if accum_results == None:
first_result = True
accum_results = rpn_results
accum_losses = losses
else:
first_result = False
for key in rpn_results:
accum_results[key] += rpn_results[key]
for key in losses:
accum_losses[key] += losses[key]
for query_index in range(gt.shape[0]):
results = iou_test(gt[query_index], gt_bbox_norm,
proposal_targets["labels_mask"],
proposal_targets["labels"][query_index, :, 0],
predictions["cls_pred_gpi"][query_index],
predictions["cls_prob_gpi"][query_index],
pred_boxes, blobs['im_info'])
results_baseline = iou_test(gt[query_index], gt_bbox_norm,
proposal_targets["labels_mask"],
proposal_targets["labels"][query_index, :, 0],
predictions["cls_pred_baseline"][query_index],
predictions["cls_prob_baseline"][query_index],
boxes0, blobs['im_info'])
# accumulate results
if first_result:
for key in results:
accum_results[key] = results[key]
for key in results_baseline:
accum_results[key + "_bl"] = results_baseline[key]
first_result = False
else:
for key in results:
accum_results[key] += results[key]
for key in results_baseline:
accum_results[key + "_bl"] += results_baseline[key]
epoch_iter += 1.0
except Exception as e:
print(e)
print("error iter %d" % (int(epoch_iter)))
continue
timer.toc()
# Display training information
if (epoch == 1 and int(epoch_iter) % (cfg.TRAIN.DISPLAY) == 0) or (int(epoch_iter) % (10000) == 0):
print_stat(name, epoch, epoch_iter, lr, accum_results, accum_losses)
def print_stat(name, epoch, epoch_iter, lr, accum_results, losses):
sub_iou = float(accum_results['sub_iou']) / accum_results['total']
obj_iou = float(accum_results['obj_iou']) / accum_results['total']
sub_kl = float(accum_results['sub_kl']) / accum_results['total']
obj_kl = float(accum_results['obj_kl']) / accum_results['total']
acc = float(accum_results['acc']) / accum_results['total']
prec0 = float(accum_results['prec0']) / (accum_results['prec0_total'] + 1.0)
prec1 = float(accum_results['prec1']) / (accum_results['prec1_total'] + 1.0)
prec2 = float(accum_results['prec2']) / (accum_results['prec2_total'] + 1.0)
prec3 = float(accum_results['prec3']) / (accum_results['prec3_total'] + 1.0)
recall0 = float(accum_results['recall0']) / (accum_results['recall0_total'] + 1.0)
recall1 = float(accum_results['recall1']) / (accum_results['recall1_total'] + 1.0)
recall2 = float(accum_results['recall2']) / (accum_results['recall2_total'] + 1.0)
recall3 = float(accum_results['recall3']) / (accum_results['recall3_total'] + 1.0)
try:
f10 = 2 * recall0 * prec0 / (recall0 + prec0)
f11 = 2 * recall1 * prec1 / (recall1 + prec1)
f12 = 2 * recall2 * prec2 / (recall2 + prec2)
f13 = 2 * recall3 * prec3 / (recall3 + prec3)
except:
f10 = 0
f11 = 0
f12 = 0
f13 = 0
sub_iou_bl = float(accum_results['sub_iou_bl']) / accum_results['total_bl']
obj_iou_bl = float(accum_results['obj_iou_bl']) / accum_results['total_bl']
sub_kl_bl = float(accum_results['sub_kl_bl']) / accum_results['total_bl']
obj_kl_bl = float(accum_results['obj_kl_bl']) / accum_results['total_bl']
acc_bl = float(accum_results['acc_bl']) / accum_results['total_bl']
prec0_bl = float(accum_results['prec0_bl']) / (accum_results['prec0_total_bl'] + 1.0)
prec1_bl = float(accum_results['prec1_bl']) / (accum_results['prec1_total_bl'] + 1.0)
prec2_bl = float(accum_results['prec2_bl']) / (accum_results['prec2_total_bl'] + 1.0)
prec3_bl = float(accum_results['prec3_bl']) / (accum_results['prec3_total_bl'] + 1.0)
recall0_bl = float(accum_results['recall0_bl']) / (accum_results['recall0_total_bl'] + 1.0)
recall1_bl = float(accum_results['recall1_bl']) / (accum_results['recall1_total_bl'] + 1.0)
recall2_bl = float(accum_results['recall2_bl']) / (accum_results['recall2_total_bl'] + 1.0)
recall3_bl = float(accum_results['recall3_bl']) / (accum_results['recall3_total_bl'] + 1.0)
try:
f10_bl = 2 * recall0_bl * prec0_bl / (recall0_bl + prec0_bl)
f11_bl = 2 * recall1_bl * prec1_bl / (recall1_bl + prec1_bl)
f12_bl = 2 * recall2_bl * prec2_bl / (recall2_bl + prec2_bl)
f13_bl = 2 * recall3_bl * prec3_bl / (recall3_bl + prec3_bl)
except:
f10_bl = 0
f11_bl = 0
f12_bl = 0
f13_bl = 0
print('\n###### %s (%s): epoch %d iter: %d, total loss: %.4f lr: %f' % \
(name, cfg.TRAIN.SNAPSHOT_PREFIX, epoch, int(epoch_iter), losses["total_loss"] / epoch_iter, lr.eval()))
print('###scene-graph')
print(">>> loss_entity_gt: %.6f loss_relation_gt: %.6f acc_entity_gt: %.4f acc_relation_gt %.4f" % (losses["ent_cross_entropy0"] / epoch_iter, losses["rel_cross_entropy0"] / epoch_iter, accum_results["gt_sg_entity_acc"] / epoch_iter, accum_results["gt_sg_relation_acc"] / epoch_iter))
print(">>> loss_entity: %.6f loss_relation: %.6f acc_entity: %.4f acc_relation %.4f" % (losses["ent_cross_entropy"] / epoch_iter, losses["rel_cross_entropy"] / epoch_iter, accum_results["sg_entity_acc"] / epoch_iter, accum_results["sg_relation_acc"] / epoch_iter))
print('###rpn')
print('>>> rpn_loss_cls: %.6f rpn_loss_box: %.6f loss_box: %.6f' % (losses["rpn_cross_entropy"] / epoch_iter, losses["rpn_loss_box"] / epoch_iter, losses["loss_box"] / epoch_iter))
print('###gpi loss: %.6f' % (losses["cross_entropy_gpi"] / epoch_iter))
print('sub_iou: %.4f obj_iou: %.4f sub_kl: %.4f obj_kl: %.4f rpn_overlaps: %.4f' % (sub_iou, obj_iou, sub_kl, obj_kl, accum_results["gpi_overlaps"] / epoch_iter))
print('acc: %.4f ' % (acc))
print('recall0: %.4f recall1: %.4f recall2: %.4f recall3: %.4f' % (recall0, recall1, recall2, recall3))
print('prec0: %.4f prec1: %.4f prec2: %.4f prec3: %.4f' % (prec0, prec1, prec2, prec3))
print('f10: %.4f f11: %.4f f12: %.4f f13: %.4f' % (f10, f11, f12, f13))
print('###baseline loss: %.6f' % (losses["cross_entropy_baseline"] / epoch_iter))
print('sub_iou_bl: %.4f obj_iou_bl: %.4f sub_kl_bl: %.4f obj_kl_bl: %.4f rpn_overlaps_bl: %.4f' % (sub_iou_bl, obj_iou_bl, sub_kl_bl, obj_kl_bl, accum_results["baseline_overlaps"] / epoch_iter))
print('acc_bl: %.4f' % (acc_bl))
print('recall0_bl: %.4f recall1_bl: %.4f recall2_bl: %.4f recall3_bl: %.4f' % (recall0_bl, recall1_bl, recall2_bl, recall3_bl))
print('prec0_bl: %.4f prec1_bl: %.4f prec2_bl: %.4f prec3_bl: %.4f' % (prec0_bl, prec1_bl, prec2_bl, prec3_bl))
print('f10_bl: %.4f f11_bl: %.4f f12_bl: %.4f f13_bl: %.4f' % (f10_bl, f11_bl, f12_bl, f13_bl))
def get_training_roidb(imdb):
"""Returns a roidb (Region of Interest database) for use in training."""
if cfg.TRAIN.USE_FLIPPED:
print('Appending horizontally-flipped training examples...')
imdb.append_flipped_images()
print('done')
print('Preparing data...')
rdl_roidb.prepare_roidb(imdb)
print('done')
return imdb.roidb
def filter_roidb(roidb):
"""Remove roidb entries that have no usable RoIs."""
def is_valid(entry):
# Valid images have:
# (1) At least one foreground RoI OR
# (2) At least one background RoI
overlaps = entry['max_overlaps']
# find boxes with sufficient overlap
fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &
(overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# image is only valid if such boxes exist
valid = len(fg_inds) > 0 or len(bg_inds) > 0
return valid
num = len(roidb)
filtered_roidb = [entry for entry in roidb if is_valid(entry)]
num_after = len(filtered_roidb)
print('Filtered {} roidb entries: {} -> {}'.format(num - num_after,
num, num_after))
return roidb
def train_net(network, imdb, roidb, valroidb, output_dir, tb_dir,
pretrained_model=None,
max_iters=100, just_test=False):
"""Train a Faster R-CNN network."""
#pretrained_model="output/res101/VisualGenome/default/gpir_imagenet_baseline_iter_7.ckpt"
#just_test=True
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth = True
with tf.Session(config=tfconfig) as sess:
sw = SolverWrapper(sess, network, imdb, roidb, valroidb, output_dir, tb_dir,
pretrained_model=pretrained_model)
print('Solving...')
sw.train_model(sess, max_iters,just_test)
print('done solving')
MASK_WIDTH = 14
MASK_HEIGHT = 14
def softmax(x):
xexp = np.exp(x)
return xexp / np.sum(xexp, axis=-1, keepdims=1)
def rpn_test(gt_bbox, pred_boxes0, pred_boxes, gt_ent, gt_rel, predictions):
overlaps0 = bbox_overlaps(np.ascontiguousarray(gt_bbox, dtype=np.float), np.ascontiguousarray(pred_boxes0, dtype=np.float))
overlaps0_assign = np.argmax(overlaps0, axis=1)
max_overlaps0 = overlaps0.max(axis=1)
overlaps = bbox_overlaps(np.ascontiguousarray(gt_bbox, dtype=np.float), np.ascontiguousarray(pred_boxes, dtype=np.float))
overlaps_assign = np.argmax(overlaps, axis=1)
max_overlaps = overlaps.max(axis=1)
pred_ent = predictions['ent_cls_score']
pred_ent = softmax(pred_ent[overlaps_assign])
ent_accuracy = np.sum(np.multiply(pred_ent, gt_ent)) / np.sum(gt_ent)
pred_rel = predictions['rel_cls_score']
pred_rel = softmax(pred_rel[overlaps_assign,:][:,overlaps_assign])
rel_accuracy = np.sum(np.multiply(pred_rel, gt_rel)) / np.sum(gt_rel)
pred_ent0 = predictions['ent_cls_score0']
pred_ent0 = softmax(pred_ent0)
ent0_accuracy = np.sum(np.multiply(pred_ent0, gt_ent)) / np.sum(gt_ent)
pred_rel0 = predictions['rel_cls_score0']
pred_rel0 = softmax(pred_rel0)
rel0_accuracy = np.sum(np.multiply(pred_rel0, gt_rel)) / np.sum(gt_rel)
results = {}
results["baseline_overlaps"] = np.mean(max_overlaps0)
results["gpi_overlaps"] = np.mean(max_overlaps)
results["sg_entity_acc"] = ent_accuracy
results["gt_sg_entity_acc"] = ent0_accuracy
results["sg_relation_acc"] = rel_accuracy
results["gt_sg_relation_acc"] = rel0_accuracy
return results
def iou_test(gt, gt_bbox, pred_mask, pred_label, pred, pred_prob, pred_bbox, im_info):
results = {}
# number of objects
results["total"] = 1
results["sub_iou"] = 0.0
results["obj_iou"] = 0.0
results["sub_kl"] = 0.0
results["obj_kl"] = 0.0
# accuracy
results["acc"] = np.sum(pred == pred_label).astype(float) / pred_label.shape[0]
# precision
for i in range(4):
total = np.sum(np.logical_and(pred == i, pred_mask != 0)).astype(float)
if total != 0:
results["prec" + str(i)] = np.sum(np.logical_and(np.logical_and(pred == pred_label, pred_label == i), pred_mask != 0)).astype(float) / total
results["prec" + str(i) + "_total"] = 1.0
else:
results["prec" + str(i)] = 0.0
results["prec" + str(i) + "_total"] = 0.0
# recall
for i in range(4):
total = np.sum(np.logical_and(pred_label == i, pred_mask != 0)).astype(float)
if total != 0:
results["recall" + str(i)] = np.sum(np.logical_and(np.logical_and(pred == pred_label, pred_label == i), pred_mask != 0)).astype(float) / total
results["recall" + str(i) + "_total"] = 1.0
else:
results["recall" + str(i)] = 0.0
results["recall" + str(i) + "_total"] = 0.0
width = MASK_WIDTH
height = MASK_HEIGHT
MASK_SHAPE = (width, height)
mask_sub_gt = np.zeros(MASK_SHAPE, dtype=float)
mask_obj_gt = np.zeros(MASK_SHAPE, dtype=float)
mask_sub_pred = np.zeros(MASK_SHAPE, dtype=float)
mask_obj_pred = np.zeros(MASK_SHAPE, dtype=float)
mask_sub_pred_bool = np.zeros(MASK_SHAPE, dtype=bool)
mask_obj_pred_bool = np.zeros(MASK_SHAPE, dtype=bool)
# sub and obj bool mask
i = np.argmax(pred_prob[:, 1])
mask_sub_pred_bool[int(math.floor(pred_bbox[i][0] * MASK_SHAPE[0])):int(math.ceil(pred_bbox[i][2] * MASK_SHAPE[0])),
int(math.floor(pred_bbox[i][1] * MASK_SHAPE[1])):int(math.ceil(pred_bbox[i][3] * MASK_SHAPE[1]))] = True
i = np.argmax(pred_prob[:, 2])
mask_obj_pred_bool[int(math.floor(pred_bbox[i][0] * MASK_SHAPE[0])):int(math.ceil(pred_bbox[i][2] * MASK_SHAPE[0])),
int(math.floor(pred_bbox[i][1] * MASK_SHAPE[1])):int(math.ceil(pred_bbox[i][3] * MASK_SHAPE[1]))] = True
for i in range(pred.shape[0]):
if pred[i] == 1:
x1 = int(math.floor(pred_bbox[i][0] * MASK_SHAPE[0]))
x2 = int(math.ceil(pred_bbox[i][2] * MASK_SHAPE[0]))
y1 = int(math.floor(pred_bbox[i][1] * MASK_SHAPE[1]))
y2 = int(math.ceil(pred_bbox[i][3] * MASK_SHAPE[1]))
mask_sub_pred_bool[x1:x2, y1:y2] = True
if pred[i] == 2:
x1 = int(math.floor(pred_bbox[i][0] * MASK_SHAPE[0]))
x2 = int(math.ceil(pred_bbox[i][2] * MASK_SHAPE[0]))
y1 = int(math.floor(pred_bbox[i][1] * MASK_SHAPE[1]))
y2 = int(math.ceil(pred_bbox[i][3] * MASK_SHAPE[1]))
mask_obj_pred_bool[x1:x2, y1:y2] = True
# GT mask
for i in range(gt.shape[0]):
if gt[i] == 1:
x1 = int(math.floor(gt_bbox[i][0] * MASK_SHAPE[0]))
x2 = int(math.ceil(gt_bbox[i][2] * MASK_SHAPE[0]))
y1 = int(math.floor(gt_bbox[i][1] * MASK_SHAPE[1]))
y2 = int(math.ceil(gt_bbox[i][3] * MASK_SHAPE[1]))
mask_sub_gt[x1:x2, y1:y2] = 1.0
if gt[i] == 2:
x1 = int(math.floor(gt_bbox[i][0] * MASK_SHAPE[0]))
x2 = int(math.ceil(gt_bbox[i][2] * MASK_SHAPE[0]))
y1 = int(math.floor(gt_bbox[i][1] * MASK_SHAPE[1]))
y2 = int(math.ceil(gt_bbox[i][3] * MASK_SHAPE[1]))
mask_obj_gt[x1:x2, y1:y2] = 1.0
# predicted mask
for i in range(pred.shape[0]):
x1 = int(math.floor(pred_bbox[i][0] * MASK_SHAPE[0]))
x2 = int(math.ceil(pred_bbox[i][2] * MASK_SHAPE[0]))
y1 = int(math.floor(pred_bbox[i][1] * MASK_SHAPE[1]))
y2 = int(math.ceil(pred_bbox[i][3] * MASK_SHAPE[1]))
mask = np.zeros(MASK_SHAPE, dtype=float)
mask[x1:x2, y1:y2] = 1.0
mask_sub_pred = np.maximum(mask_sub_pred, mask * pred_prob[i, 1])
mask_obj_pred = np.maximum(mask_obj_pred, mask * pred_prob[i, 2])
sub_iou = iou(mask_sub_gt.astype(bool), mask_sub_pred_bool)
obj_iou = iou(mask_obj_gt.astype(bool), mask_obj_pred_bool)
sub_kl = kl(mask_sub_gt, mask_sub_pred)
obj_kl = kl(mask_obj_gt, mask_obj_pred)
results["sub_iou"] += sub_iou
results["obj_iou"] += obj_iou
results["sub_kl"] += sub_kl
results["obj_kl"] += obj_kl
return results
def iou(mask_a, mask_b):
union = np.sum(np.logical_or(mask_a, mask_b))
if union == 0:
return 0.0
intersection = np.sum(np.logical_and(mask_a, mask_b))
return float(intersection) / float(union)
def kl(mask_gt, mask_pred):
gt = mask_gt.astype(float) / (np.sum(mask_gt) + k.backend.epsilon())
pred = mask_pred.astype(float) / ( | np.sum(mask_pred) | numpy.sum |
import numpy as np
from seisig import*
def create_timewedgeModel(seismic, model, dtc, Theta, Beta, tBottom, tTop, wedgeAngle, Q=False):
print('\n\n Cálculos del modelo de cuña sintético\n\n')
FreqVel = model.vp[0]
for z in range(seismic.zLen):
print(('CDP #{} de {}').format(z+1, seismic.zLen))
for x in range(seismic.xTraces):
R = ReflectivityS( ns = seismic.ySamples )
#top reflector
digitize_wedge(R, model, dtc, Theta[z][x], Beta[z][x], tTop[z][x], 'top', wedgeAngle)
Wv = Wavelet(wtype='bp', wf=[5, 10, 40, 80], duration=0.28, wdt=seismic.dt)
#Wv = Wavelet(wtype='r', wf=75)
if Q:
Wv.apply_Q(tTop[z][x], FreqVel)
trace = Trace(wavelet = Wv, rseries = R.rserie)
#base reflector
R = ReflectivityS( ns = seismic.ySamples )
digitize_wedge(R, model, dtc, Theta[z][x], Beta[z][x], tBottom[z][x], 'base', wedgeAngle)
Wv = Wavelet(wtype='bp', wf=[5, 10, 40, 80], duration=0.28, wdt=seismic.dt)
#Wv = Wavelet(wtype='r', wf=75)
if Q:
Wv.apply_Q(tBottom[z][x], FreqVel)
traceB = Trace(wavelet = Wv, rseries = R.rserie)
trace+= traceB
#calculation made on individual trace are made here
seismic.add_trace(trace, x, z)
def Xmin(angmax, topDepth):
return -np.tan(angmax) * topDepth
def Xmax(angmax, topDepth, gamma, dhmax):
return topDepth * np.tan(angmax) + dhmax / np.tan(gamma)
def XonWedge(angle_in, topDepth, Xsrc):
xw = Xsrc + topDepth * np.tan(angle_in)
if xw.any() < -0.1:
raise ValueError('Something went wrong on XonWedge')
return -1
else:
return xw
def DHtop(angle_in, topDepth, gamma, Xsrc):
return XonWedge(angle_in, topDepth, Xsrc) * np.tan(gamma)
def AlphaTr(angle_in, V1, V2):
return np.arcsin(np.sin(angle_in) * V2 / V1)
def BetaBase(angle_in, gamma, V1, V2):
return AlphaTr(angle_in, V1, V2) + gamma
def PsiTr(angle_in, gamma, V1, V2):
return np.arcsin(np.sin(DeltaUp(angle_in, gamma, V1, V2)) * V1/V2)
def DeltaUp(angle_in, gamma, V1, V2):
return 2 * BetaBase(angle_in, gamma, V1, V2) - AlphaTr(angle_in, V1, V2)
def P1(angle_in, topDepth):
return topDepth / np.cos(angle_in)
def P2(angle_in, topDepth, gamma, Xsrc, V1, V2):
return ( np.cos(gamma) * DHtop(angle_in, topDepth, gamma, Xsrc) / \
np.cos(BetaBase(angle_in, gamma, V1, V2)) )
def XbaseWedge(angle_in, topDepth, gamma, Xsrc, V1, V2):
xb = XonWedge(angle_in, topDepth, Xsrc) + np.sin(AlphaTr(angle_in, V1, V2)) * \
P2(angle_in, topDepth, gamma, Xsrc, V1, V2)
if xb.any() < -0.1:
raise ValueError('Something went wrong on XbaseWedge')
return -1
else:
return xb
def DHbase(angle_in, topDepth, gamma, Xsrc, V1, V2):
return XbaseWedge(angle_in, topDepth, gamma, Xsrc, V1, V2) * np.tan(gamma)
def P3(angle_in, topDepth, gamma, Xsrc, V1, V2):
return np.cos(AlphaTr(angle_in, V1, V2)) * P2(angle_in, topDepth, gamma, Xsrc, V1, V2) \
/ np.cos(2 * gamma + AlphaTr(angle_in, V1, V2))
def P4(angle_in, topDepth, gamma, V1, V2):
return topDepth / np.cos(PsiTr(angle_in, gamma, V1, V2))
def fullOffset(angle_in, topDepth, gamma, Xsrc, V1, V2):
return P1(angle_in, topDepth) * np.sin(angle_in) + P3(angle_in, topDepth, gamma, Xsrc, V1, V2)\
* np.sin(2 * BetaBase(angle_in, gamma, V1, V2)) / np.cos(AlphaTr(angle_in, V1, V2)) + \
np.tan(PsiTr(angle_in, gamma, V1, V2)) * topDepth
def ThetaEquiv(angle_in, topDepth, gamma, Xsrc, V1, V2):
return np.arctan(0.5 * fullOffset(angle_in, topDepth, gamma, Xsrc, V1, V2) / topDepth)
def wedge_shotgather(gamma, radmax_downwards, radmax_upwards, angstep, topDepth, velocities, X):
v1, v2 = velocities[0], velocities[1] #m/s
Angles_in = np.zeros(0, dtype='float')
Angles_top = np.zeros(0, dtype='float')
Angles_base = np.zeros(0, dtype='float')
rad_in = -np.arctan(X / topDepth) #ang min
while True:
beta = BetaBase(rad_in, gamma, v1, v2)
delta = DeltaUp(rad_in, gamma, v1, v2)
theta = ThetaEquiv(rad_in, topDepth, gamma, X, v1, v2)
Angles_in = np.append(Angles_in, rad_in)
Angles_base = np.append(Angles_base, beta)
Angles_top = np.append(Angles_top, theta)
if ((theta >= radmax_downwards) or (delta >= radmax_upwards)):
break
rad_in += angstep
RayPath_top = 2 * topDepth / np.cos(Angles_top)
RayPath_base1 = P1(Angles_in, topDepth) + P4(Angles_in, topDepth, gamma, v1, v2)
RayPath_base2 = P2(Angles_in, topDepth, gamma, X, v1, v2) + \
P3(Angles_in, topDepth, gamma, X, v1, v2)
RayPath_base = RayPath_base1 + RayPath_base2
TopTime = RayPath_top / v1
BaseTime = ((RayPath_base1 / v1) + (RayPath_base2 / v2))
CDPtop = XonWedge(Angles_top, topDepth, X)
CDPbase = XbaseWedge(Angles_in, topDepth, gamma, X, v1, v2)
TopDH = DHtop(Angles_top, topDepth, gamma, X)
BaseDH = DHbase(Angles_in, topDepth, gamma, X, v1, v2)
return Angles_top, Angles_base, RayPath_top, RayPath_base, TopTime, BaseTime, TopDH, BaseDH, \
CDPtop, CDPbase
def wedge_array_maker(model, wedgeSlope, dhmax, maxAng, topDepth, nsrc=500):
import warnings
warnings.filterwarnings("error")
velocities = model.vp
v1, v2 = velocities[0], velocities[1] #m/s
gamma = np.radians(wedgeSlope)
try:
radmax_downwards = min(np.arcsin(v1/v2), np.radians(maxAng))
except:
radmax_downwards = np.radians(maxAng)
try:
radmax_upwards = min(np.arcsin(v2/v1), DeltaUp(radmax_downwards, gamma, v1, v2))
except:
radmax_upwards = DeltaUp(radmax_downwards, gamma, v1, v2)
srcMin = Xmin(radmax_downwards, topDepth)
srcMax = Xmax(radmax_downwards, topDepth, gamma, dhmax)
XsrcVector = np.linspace(srcMin, srcMax, nsrc)
XsrcStep = XsrcVector[-1] - XsrcVector[-2]
angStep = np.arctan(XsrcStep / topDepth)
#print(XsrcVector)
sizeX = int(np.ceil(1+2 * radmax_downwards/angStep))
spanSize = np.zeros(XsrcVector.size, dtype='int')
for i in range(XsrcVector.size):
th, be, ru, rl, tt, tb, dhu, dhl, cdpu, cdpl = wedge_shotgather(gamma, radmax_downwards, \
radmax_upwards, angStep, topDepth, velocities, XsrcVector[i])
spanSize[i] = th.size
if i == 0:
TH = np.nan * np.ones(sizeX, dtype='float')
TH[:th.size] = th
BE = np.nan * np.ones(sizeX, dtype='float')
BE[:be.size] = be
RU = np.nan * np.ones(sizeX, dtype='float')
RU[:ru.size] = ru
RL = np.nan * np.ones(sizeX, dtype='float')
RL[:rl.size] = rl
TT = np.nan * np.ones(sizeX, dtype='float')
TT[:tt.size] = tt
TB = np.nan * np.ones(sizeX, dtype='float')
TB[:tb.size] = tb
DHU = np.nan * np.ones(sizeX, dtype='float')
DHU[:dhu.size] = dhu
DHL = np.nan * np.ones(sizeX, dtype='float')
DHL[:dhl.size] = dhl
CDPU = np.nan * np.ones(sizeX, dtype='float')
CDPU[:cdpu.size] = cdpu
CDPL = np.nan * np.ones(sizeX, dtype='float')
CDPL[:cdpl.size] = cdpl
X = np.nan * np.ones(sizeX, dtype='float')
X[:cdpl.size] = XsrcVector[i] * np.ones(cdpl.size, dtype='float')
else:
aux = np.nan * np.ones(sizeX, dtype='float')
aux[:th.size] = th
TH = np.vstack([TH, aux])
aux[:be.size] = be
BE = np.vstack([BE, aux])
aux[:ru.size] = ru
RU = np.vstack([RU, aux])
aux[:rl.size] = rl
RL = np.vstack([RL, aux])
aux[:tt.size] = tt
TT = np.vstack([TT, aux])
aux[:tb.size] = tb
TB = np.vstack([TB, aux])
aux[:dhu.size] = dhu
DHU = np.vstack([DHU, aux])
aux[:dhl.size] = dhl
DHL = np.vstack([DHL, aux])
aux[:cdpu.size] = cdpu
CDPU = np.vstack([CDPU, aux])
aux[:cdpl.size] = cdpl
CDPL = np.vstack([CDPL, aux])
aux[:cdpl.size] = XsrcVector[i] * np.ones(cdpl.size, dtype='float')
X = np.vstack([X, aux])
del(aux)
return TH[~np.isnan(TH)], BE[~np.isnan(BE)], RU[~np.isnan(RU)], RL[~np.isnan(RL)], TT[~np.isnan(TT)], \
TB[~np.isnan(TB)], DHU[~np.isnan(DHU)], DHL[~np.isnan(DHL)], CDPU[~np.isnan(CDPU)], \
CDPL[~np.isnan(CDPL)], X[~np.isnan(X)], XsrcStep, XsrcVector
def CDPgather(srcspacing, cdpMax, CDParray, th, be, ru, rl, tt, tb, dhu, dhl):
'''
all the arrays should be reshaped to 1d
'''
cdpRanges = np.arange(0.0, cdpMax + 2 * srcspacing, srcspacing)
zSize = cdpRanges.size-1
cdpVector = np.zeros([zSize, 2])
TH = np.zeros([zSize, zSize], dtype='float')
BE = np.zeros([zSize, zSize], dtype='float')
RU = np.zeros([zSize, zSize], dtype='float')
RL = np.zeros([zSize, zSize], dtype='float')
TT = np.zeros([zSize, zSize], dtype='float')
TB = np.zeros([zSize, zSize], dtype='float')
DHU = np.zeros([zSize, zSize], dtype='float')
DHL = | np.zeros([zSize, zSize], dtype='float') | numpy.zeros |
"""Tests of the homogeneity module"""
import unittest
import dcor
import numpy as np
class TestEnergyTest(unittest.TestCase):
"""Tests for the homogeneity energy test function."""
def test_same_distribution_same_parameters(self):
"""
Test that the test works on equal distributions.
As the distributions are the same, the test should not reject
the null hypothesis.
"""
vector_size = 10
num_samples = 100
mean = np.zeros(vector_size)
cov = np.eye(vector_size)
random_state = np.random.RandomState(0)
a = random_state.multivariate_normal(mean=mean,
cov=cov,
size=num_samples)
b = random_state.multivariate_normal(mean=mean,
cov=cov,
size=num_samples)
significance = 0.01
num_resamples = int(3 / significance + 1)
result = dcor.homogeneity.energy_test(
a, b, num_resamples=num_resamples, random_state=random_state)
self.assertGreater(result.p_value, significance)
def test_same_distribution_different_means(self):
"""
Test that the test works on distributions with different means.
As the distributions are not the same, the test should reject
the null hypothesis.
"""
vector_size = 10
num_samples = 100
mean_0 = np.zeros(vector_size)
mean_1 = np.ones(vector_size)
cov = np.eye(vector_size)
random_state = np.random.RandomState(0)
a = random_state.multivariate_normal(mean=mean_0, cov=cov,
size=num_samples)
b = random_state.multivariate_normal(mean=mean_1, cov=cov,
size=num_samples)
significance = 0.01
num_resamples = int(3 / significance + 1)
result = dcor.homogeneity.energy_test(
a, b, num_resamples=num_resamples, random_state=random_state)
self.assertLess(result.p_value, significance)
def test_same_distribution_different_covariances(self):
"""
Test that the test works on distributions with different covariance.
As the distributions are not the same, the test should reject
the null hypothesis.
"""
vector_size = 10
num_samples = 100
mean = np.zeros(vector_size)
cov_0 = np.eye(vector_size)
cov_1 = 3 * np.eye(vector_size)
random_state = np.random.RandomState(0)
a = random_state.multivariate_normal(mean=mean, cov=cov_0,
size=num_samples)
b = random_state.multivariate_normal(mean=mean, cov=cov_1,
size=num_samples)
significance = 0.01
num_resamples = int(3 / significance + 1)
result = dcor.homogeneity.energy_test(
a, b, num_resamples=num_resamples, random_state=random_state)
self.assertLess(result.p_value, significance)
def test_different_distributions(self):
"""
Test that the test works on different distributions.
As the distributions are not the same, the test should reject
the null hypothesis.
"""
num_samples = 100
random_state = | np.random.RandomState(0) | numpy.random.RandomState |
from fastconv import corr1d
import numpy as np
import os
TESTCASE_PATH = 'testcases'
MULTIDATA_MULTICHAN_FNAME = 'multidata_multichan.npy'
MULTICHAN_FILTERS_FNAME = 'multifilters.npy'
FILTER_PATH = os.path.join(TESTCASE_PATH, MULTICHAN_FILTERS_FNAME)
DATA_PATH = os.path.join(TESTCASE_PATH, MULTIDATA_MULTICHAN_FNAME)
FILTERS = np.load(FILTER_PATH)
DATA = np.load(DATA_PATH)
def test_corr1D_AA_double():
test_data = DATA[0, 35, :].astype(np.float64)
test_filter = FILTERS[0, 35, :].astype(np.float64)
comparison = np.correlate(test_data, test_filter, mode='valid') # type: np.ndarray
my_output = corr1d.short_filter_correlate1D(test_data, test_filter) # type: np.ndarray
assert my_output.dtype == np.float64, 'data type should be np.float64'
assert comparison.shape == my_output.shape, 'shape is {0}, should be {1}'.format(my_output.shape,
comparison.shape)
assert np.allclose(comparison, my_output, rtol=1e-3, atol=1e-2)
def test_corr1D_AA_float():
test_data = DATA[0, 35, :].astype(np.float32)
test_filter = FILTERS[0, 35, :].astype(np.float32)
comparison = np.correlate(test_data, test_filter, mode='valid')
my_output = corr1d.short_filter_correlate1D(test_data, test_filter)
assert my_output.dtype == np.float32, 'data type should be np.float32'
assert comparison.shape == my_output.shape, 'shape is {0}, should be {1}'.format(my_output.shape,
comparison.shape)
assert np.allclose(comparison, my_output, rtol=1e-3, atol=1e-2)
def test_corr1D_AB_double():
test_data = DATA[0, 35, 1000:2001].astype(np.float64)
test_filter = FILTERS[0, 35, :].astype(np.float64)
comparison = np.correlate(test_data, test_filter, mode='valid') # type: np.ndarray
my_output = corr1d.short_filter_correlate1D(test_data, test_filter) # type: np.ndarray
assert my_output.dtype == np.float64, 'data type should be np.float64'
assert comparison.shape == my_output.shape, 'shape is {0}, should be {1}'.format(my_output.shape,
comparison.shape)
assert np.allclose(comparison, my_output, rtol=1e-3, atol=1e-2)
def test_corr1D_AB_float():
test_data = DATA[0, 35, 1000:2001].astype(np.float32)
test_filter = FILTERS[0, 35, :].astype(np.float32)
comparison = np.correlate(test_data, test_filter, mode='valid')
my_output = corr1d.short_filter_correlate1D(test_data, test_filter)
assert my_output.dtype == np.float32, 'data type should be np.float32'
assert comparison.shape == my_output.shape, 'shape is {0}, should be {1}'.format(my_output.shape,
comparison.shape)
assert np.allclose(comparison, my_output, rtol=1e-3, atol=1e-2)
def test_corr1D_AC_double():
test_data = FILTERS[0, 35, :].astype(np.float64)
test_filter = FILTERS[0, 35, :].astype(np.float64)
comparison = np.correlate(test_data, test_filter, mode='valid') # type: np.ndarray
my_output = corr1d.short_filter_correlate1D(test_data, test_filter) # type: np.ndarray
assert my_output.dtype == np.float64, 'data type should be np.float64'
assert comparison.shape == my_output.shape, 'shape is {0}, should be {1}'.format(my_output.shape,
comparison.shape)
assert np.allclose(comparison, my_output, rtol=1e-3, atol=1e-2)
def test_corr1D_AC_float():
test_data = FILTERS[0, 35, :].astype(np.float32)
test_filter = FILTERS[0, 35, :].astype(np.float32)
comparison = np.correlate(test_data, test_filter, mode='valid')
my_output = corr1d.short_filter_correlate1D(test_data, test_filter)
assert my_output.dtype == np.float32, 'data type should be np.float32'
assert comparison.shape == my_output.shape, 'shape is {0}, should be {1}'.format(my_output.shape,
comparison.shape)
assert np.allclose(comparison, my_output, rtol=1e-3, atol=1e-2)
def test_single_filter_multidata_1D_A_double():
test_data = DATA[0, ...].astype(np.float64)
test_filter = FILTERS[0, 35, :].astype(np.float64)
comparison = np.zeros((test_data.shape[0], test_data.shape[1] - test_filter.shape[0] + 1),
dtype=np.float64)
for i in range(test_data.shape[0]):
comparison[i, ...] = np.correlate(test_data[i, :], test_filter)
my_output = corr1d.single_filter_multiple_data_correlate1D(test_data, test_filter)
assert my_output.dtype == np.float64
assert my_output.shape == comparison.shape
assert np.allclose(comparison, my_output, rtol=1e-3, atol=1e-2)
def test_single_filter_multidata_1D_A_float():
test_data = DATA[0, ...].astype(np.float32)
test_filter = FILTERS[0, 35, :].astype(np.float32)
comparison = np.zeros((test_data.shape[0], test_data.shape[1] - test_filter.shape[0] + 1),
dtype=np.float32)
for i in range(test_data.shape[0]):
comparison[i, ...] = np.correlate(test_data[i, :], test_filter)
my_output = corr1d.single_filter_multiple_data_correlate1D(test_data, test_filter)
assert my_output.dtype == np.float32
assert my_output.shape == comparison.shape
assert np.allclose(comparison, my_output, rtol=1e-3, atol=1e-2)
def test_single_filter_multidata_1D_B_double():
test_data = DATA[0, :, :1001].astype(np.float64)
test_filter = FILTERS[0, 35, :].astype(np.float64)
comparison = np.zeros((test_data.shape[0], test_data.shape[1] - test_filter.shape[0] + 1),
dtype=np.float64)
for i in range(test_data.shape[0]):
comparison[i, ...] = np.correlate(test_data[i, :], test_filter)
my_output = corr1d.single_filter_multiple_data_correlate1D(test_data, test_filter)
assert my_output.dtype == np.float64
assert my_output.shape == comparison.shape
assert np.allclose(comparison, my_output, rtol=1e-3, atol=1e-2)
def test_single_filter_multidata_1D_B_float():
test_data = DATA[0, :, :1001].astype(np.float32)
test_filter = FILTERS[0, 35, :].astype(np.float32)
comparison = np.zeros((test_data.shape[0], test_data.shape[1] - test_filter.shape[0] + 1),
dtype=np.float32)
for i in range(test_data.shape[0]):
comparison[i, ...] = np.correlate(test_data[i, :], test_filter)
my_output = corr1d.single_filter_multiple_data_correlate1D(test_data, test_filter)
assert my_output.dtype == np.float32
assert my_output.shape == comparison.shape
assert np.allclose(comparison, my_output, rtol=1e-3, atol=1e-2)
def test_single_data_multifilter_1D_A_double():
test_data = DATA[0, 35, :].astype(np.float64)
test_filter = FILTERS[0, :, :].astype(np.float64)
comparison = np.zeros((test_filter.shape[0], test_data.shape[0] - test_filter.shape[1] + 1),
dtype=np.float64)
for i in range(test_filter.shape[0]):
comparison[i, ...] = np.correlate(test_data, test_filter[i, :])
my_output = corr1d.multiple_filter_single_data_correlate1D(test_data, test_filter)
assert my_output.dtype == np.float64
assert my_output.shape == comparison.shape
assert np.allclose(comparison, my_output, rtol=1e-3, atol=1e-2)
def test_single_data_multifilter_1D_A_float():
test_data = DATA[0, 35, :].astype(np.float32)
test_filter = FILTERS[0, :, :].astype(np.float32)
comparison = np.zeros((test_filter.shape[0], test_data.shape[0] - test_filter.shape[1] + 1),
dtype=np.float32)
for i in range(test_filter.shape[0]):
comparison[i, ...] = np.correlate(test_data, test_filter[i, :])
my_output = corr1d.multiple_filter_single_data_correlate1D(test_data, test_filter)
assert my_output.dtype == np.float32
assert my_output.shape == comparison.shape
assert np.allclose(comparison, my_output, rtol=1e-3, atol=1e-2)
def test_single_data_multifilter_1D_B_double():
test_data = DATA[0, 54, 1337:9999].astype(np.float64)
test_filter = FILTERS[0, :, :].astype(np.float64)
comparison = np.zeros((test_filter.shape[0], test_data.shape[0] - test_filter.shape[1] + 1),
dtype=np.float64)
for i in range(test_filter.shape[0]):
comparison[i, ...] = np.correlate(test_data, test_filter[i, :])
my_output = corr1d.multiple_filter_single_data_correlate1D(test_data, test_filter)
assert my_output.dtype == np.float64
assert my_output.shape == comparison.shape
assert np.allclose(comparison, my_output, rtol=1e-3, atol=1e-2)
def test_single_data_multifilter_1D_B_float():
test_data = DATA[0, 54, 1337:9999].astype(np.float32)
test_filter = FILTERS[0, :, :].astype(np.float32)
comparison = np.zeros((test_filter.shape[0], test_data.shape[0] - test_filter.shape[1] + 1),
dtype=np.float32)
for i in range(test_filter.shape[0]):
comparison[i, ...] = np.correlate(test_data, test_filter[i, :])
my_output = corr1d.multiple_filter_single_data_correlate1D(test_data, test_filter)
assert my_output.dtype == np.float32
assert my_output.shape == comparison.shape
assert np.allclose(comparison, my_output, rtol=1e-3, atol=1e-2)
def test_multidata_multifilter_1D_A_double():
test_data = DATA[0, 13:26, :].astype(np.float64)
test_filter = FILTERS[0, 15:19, :].astype(np.float64)
comparison = np.zeros((test_data.shape[0], test_filter.shape[0], test_data.shape[1] - test_filter.shape[1] + 1),
dtype=np.float64)
for j in range(test_data.shape[0]):
for i in range(test_filter.shape[0]):
comparison[j, i, :] = np.correlate(test_data[j, :], test_filter[i, :])
my_output = corr1d.multiple_filter_multiple_data_correlate1D(test_data, test_filter)
assert my_output.dtype == np.float64
assert my_output.shape == comparison.shape
assert np.allclose(comparison, my_output, rtol=1e-3, atol=1e-2)
def test_multidata_multifilter_1D_A_float():
test_data = DATA[0, 13:26, :].astype(np.float32)
test_filter = FILTERS[0, 15:19, :].astype(np.float32)
comparison = np.zeros((test_data.shape[0], test_filter.shape[0], test_data.shape[1] - test_filter.shape[1] + 1),
dtype=np.float32)
for j in range(test_data.shape[0]):
for i in range(test_filter.shape[0]):
comparison[j, i, :] = np.correlate(test_data[j, :], test_filter[i, :])
my_output = corr1d.multiple_filter_multiple_data_correlate1D(test_data, test_filter)
assert my_output.dtype == np.float32
assert my_output.shape == comparison.shape
assert np.allclose(comparison, my_output, rtol=1e-3, atol=1e-2)
def test_single_data_single_filter_accum_A_double():
test_data = (DATA[0, :, :] / 10.0).astype(np.float64)
test_filter = (FILTERS[0, :, :] / 10.0).astype(np.float64)
buffer = np.zeros((test_data.shape[0], test_data.shape[1] - test_filter.shape[1] + 1),
dtype=np.float64)
for j in range(test_data.shape[0]):
buffer[j, :] = np.correlate(test_data[j, :], test_filter[j, :])
comparison = np.sum(buffer, axis=0)
my_output = corr1d.multichan_accum_correlate1D(test_data, test_filter)
assert my_output.dtype == np.float64
assert my_output.shape == comparison.shape
assert np.allclose(comparison, my_output, rtol=1e-3, atol=1e-2)
def test_single_data_single_filter_accum_A_float():
test_data = (DATA[0, :, :] / 10.0).astype(np.float32)
test_filter = (FILTERS[0, :, :] / 10.0).astype(np.float32)
buffer = np.zeros((test_data.shape[0], test_data.shape[1] - test_filter.shape[1] + 1),
dtype=np.float32)
for j in range(test_data.shape[0]):
buffer[j, :] = np.correlate(test_data[j, :], test_filter[j, :])
comparison = np.sum(buffer, axis=0)
my_output = corr1d.multichan_accum_correlate1D(test_data, test_filter)
assert my_output.dtype == np.float32
assert my_output.shape == comparison.shape
assert np.allclose(comparison, my_output, rtol=1e-3, atol=1e-2)
def test_single_data_multifilter_accum_A_double():
test_data = (DATA[0, :13, :] / 10.0).astype(np.float64)
test_filter = (FILTERS[:, :13, :] / 10.0).astype(np.float64)
buffer = np.zeros((test_filter.shape[0], test_data.shape[0], test_data.shape[1] - test_filter.shape[2] + 1),
dtype=np.float64)
for k in range(test_filter.shape[0]):
for j in range(test_data.shape[0]):
buffer[k, j, :] = np.correlate(test_data[j, :], test_filter[k, j, :])
comparison = np.sum(buffer, axis=1)
my_output = corr1d.batch_filter_multichan_accum_correlate1D(test_data, test_filter)
print(np.max(np.abs(comparison - my_output)))
assert my_output.dtype == np.float64
assert my_output.shape == comparison.shape
assert np.allclose(comparison, my_output, rtol=1e-3, atol=1e-2)
def test_single_data_multifilter_accum_A_float():
test_data = (DATA[0, :13, :] / 10.0).astype(np.float32)
test_filter = (FILTERS[:, :13, :] / 10.0).astype(np.float32)
buffer = np.zeros((test_filter.shape[0], test_data.shape[0], test_data.shape[1] - test_filter.shape[2] + 1),
dtype=np.float32)
for k in range(test_filter.shape[0]):
for j in range(test_data.shape[0]):
buffer[k, j, :] = np.correlate(test_data[j, :], test_filter[k, j, :])
comparison = np.sum(buffer, axis=1)
my_output = corr1d.batch_filter_multichan_accum_correlate1D(test_data, test_filter)
print(np.max(np.abs(comparison - my_output)))
assert my_output.dtype == np.float32
assert my_output.shape == comparison.shape
assert np.allclose(comparison, my_output, rtol=1e-3, atol=1e-2)
def test_multidata_single_filter_accum_A_double():
test_data = (DATA[:, :13, :] / 10.0).astype(np.float64)
test_filter = (FILTERS[0, :13, :] / 10.0).astype(np.float64)
buffer = np.zeros((test_data.shape[0], test_data.shape[1], test_data.shape[2] - test_filter.shape[1] + 1),
dtype=np.float64)
for k in range(test_data.shape[0]):
for j in range(test_data.shape[1]):
buffer[k, j, :] = np.correlate(test_data[k, j, :], test_filter[j, :])
comparison = np.sum(buffer, axis=1)
my_output = corr1d.batch_data_multichan_accum_correlate1D(test_data, test_filter)
print(np.max(np.abs(comparison - my_output)))
assert my_output.dtype == np.float64
assert my_output.shape == comparison.shape
assert np.allclose(comparison, my_output, rtol=1e-2, atol=1e-2)
def test_multidata_single_filter_accum_A_float():
test_data = (DATA[:, :13, :] / 10.0).astype(np.float32)
test_filter = (FILTERS[0, :13, :] / 10.0).astype(np.float32)
buffer = np.zeros((test_data.shape[0], test_data.shape[1], test_data.shape[2] - test_filter.shape[1] + 1),
dtype=np.float32)
for k in range(test_data.shape[0]):
for j in range(test_data.shape[1]):
buffer[k, j, :] = np.correlate(test_data[k, j, :], test_filter[j, :])
comparison = np.sum(buffer, axis=1)
my_output = corr1d.batch_data_multichan_accum_correlate1D(test_data, test_filter)
print(np.max(np.abs(comparison - my_output)))
assert my_output.dtype == np.float32
assert my_output.shape == comparison.shape
assert np.allclose(comparison, my_output, rtol=1e-2, atol=1e-2)
def test_multidata_multifilter_accum_A_double():
test_data = (DATA[:, :13, :] / 10.0).astype(np.float64)
test_filter = (FILTERS[:, :13, :] / 10.0).astype(np.float64)
buffer = np.zeros((test_data.shape[0], test_filter.shape[0],
test_data.shape[1], test_data.shape[2] - test_filter.shape[2] + 1),
dtype=np.float64)
for k in range(test_data.shape[0]):
for l in range(test_filter.shape[0]):
for j in range(test_data.shape[1]):
buffer[k, l, j, :] = np.correlate(test_data[k, j, :], test_filter[l, j, :])
comparison = np.sum(buffer, axis=2)
my_output = corr1d.batch_data_batch_filter_multichan_accum_correlate1D(test_data, test_filter)
print(np.max(np.abs(comparison - my_output)))
assert my_output.dtype == np.float64
assert my_output.shape == comparison.shape
assert np.allclose(comparison, my_output, rtol=1e-2, atol=1e-2)
def test_multidata_multifilter_accum_A_float():
test_data = (DATA[:, :13, :] / 10.0).astype(np.float32)
test_filter = (FILTERS[:, :13, :] / 10.0).astype(np.float32)
buffer = np.zeros((test_data.shape[0], test_filter.shape[0],
test_data.shape[1], test_data.shape[2] - test_filter.shape[2] + 1),
dtype=np.float32)
for k in range(test_data.shape[0]):
for l in range(test_filter.shape[0]):
for j in range(test_data.shape[1]):
buffer[k, l, j, :] = np.correlate(test_data[k, j, :], test_filter[l, j, :])
comparison = np.sum(buffer, axis=2)
my_output = corr1d.batch_data_batch_filter_multichan_accum_correlate1D(test_data, test_filter)
print(np.max(np.abs(comparison - my_output)))
assert my_output.dtype == np.float32
assert my_output.shape == comparison.shape
assert np.allclose(comparison, my_output, rtol=1e-2, atol=1e-2)
def test__single_filter_single_data_channel_correlate1D():
N_CH = 13
test_data = (DATA[0, :N_CH, :] / 10.0).astype(np.float64)
test_filter = (FILTERS[0, :N_CH, :] / 10.0).astype(np.float64)
buffer = np.zeros((test_data.shape[0], test_data.shape[1] - test_filter.shape[1] + 1),
dtype=np.float64)
for k in range(N_CH):
buffer[k, :] = np.correlate(test_data[k, :], test_filter[k, :])
my_output = corr1d.single_filter_single_data_channel_correlate1D(test_data,
test_filter)
print(np.max(np.abs(buffer - my_output)))
assert my_output.dtype == np.float64
assert my_output.shape == buffer.shape
assert np.allclose(buffer, my_output, rtol=1e-2, atol=1e-2)
def test__single_filter_single_data_channel_correlate1D__float():
N_CH = 13
test_data = (DATA[0, :N_CH, :] / 10.0).astype(np.float32)
test_filter = (FILTERS[0, :N_CH, :] / 10.0).astype(np.float32)
buffer = np.zeros((test_data.shape[0], test_data.shape[1] - test_filter.shape[1] + 1),
dtype=np.float32)
for k in range(N_CH):
buffer[k, :] = np.correlate(test_data[k, :], test_filter[k, :])
my_output = corr1d.single_filter_single_data_channel_correlate1D(test_data,
test_filter)
print(np.max(np.abs(buffer - my_output)))
assert my_output.dtype == np.float32
assert my_output.shape == buffer.shape
assert np.allclose(buffer, my_output, rtol=1e-2, atol=1e-2)
def test__single_filter_batch_data_channel_correlate1D():
N_CH = 13
test_data = (DATA[:, :N_CH, :] / 10.0).astype(np.float64)
test_filter = (FILTERS[0, :N_CH, :] / 10.0).astype(np.float64)
buffer = np.zeros((test_data.shape[0], test_data.shape[1], test_data.shape[2] - test_filter.shape[1] + 1),
dtype=np.float64)
for i in range(test_data.shape[0]):
for k in range(test_data.shape[1]):
buffer[i, k, :] = np.correlate(test_data[i, k, :], test_filter[k, :])
my_output = corr1d.single_filter_batch_data_channel_correlate1D(test_data,
test_filter)
print(np.max(np.abs(buffer - my_output)))
assert my_output.dtype == np.float64
assert my_output.shape == buffer.shape
assert np.allclose(buffer, my_output, rtol=1e-2, atol=1e-2)
def test__single_filter_batch_data_channel_correlate1D__float():
N_CH = 13
test_data = (DATA[:, :N_CH, :] / 10.0).astype(np.float32)
test_filter = (FILTERS[0, :N_CH, :] / 10.0).astype(np.float32)
buffer = np.zeros((test_data.shape[0], test_data.shape[1], test_data.shape[2] - test_filter.shape[1] + 1),
dtype=np.float32)
for i in range(test_data.shape[0]):
for k in range(test_data.shape[1]):
buffer[i, k, :] = np.correlate(test_data[i, k, :], test_filter[k, :])
my_output = corr1d.single_filter_batch_data_channel_correlate1D(test_data,
test_filter)
print(np.max(np.abs(buffer - my_output)))
assert my_output.dtype == np.float32
assert my_output.shape == buffer.shape
assert np.allclose(buffer, my_output, rtol=1e-2, atol=1e-2)
def test__batch_filter_single_data_channel_correlate1D():
N_CH = 13
test_data = (DATA[0, :N_CH, :] / 10.0).astype(np.float64)
test_filter = (FILTERS[:, :N_CH, :] / 10.0).astype(np.float64)
buffer = np.zeros((test_filter.shape[0], test_filter.shape[1], test_data.shape[1] - test_filter.shape[2] + 1),
dtype=np.float64)
for i in range(test_filter.shape[0]):
for k in range(N_CH):
buffer[i, k, :] = np.correlate(test_data[k, :], test_filter[i, k, :])
my_output = corr1d.batch_filter_single_data_channel_correlate1D(test_data,
test_filter)
print(np.max(np.abs(buffer - my_output)))
assert my_output.dtype == np.float64
assert my_output.shape == buffer.shape
assert np.allclose(buffer, my_output, rtol=1e-2, atol=1e-2)
def test__batch_filter_single_data_channel_correlate1D__float():
N_CH = 13
test_data = (DATA[0, :N_CH, :] / 10.0).astype(np.float32)
test_filter = (FILTERS[:, :N_CH, :] / 10.0).astype(np.float32)
buffer = np.zeros((test_filter.shape[0], test_filter.shape[1], test_data.shape[1] - test_filter.shape[2] + 1),
dtype=np.float32)
for i in range(test_filter.shape[0]):
for k in range(N_CH):
buffer[i, k, :] = np.correlate(test_data[k, :], test_filter[i, k, :])
my_output = corr1d.batch_filter_single_data_channel_correlate1D(test_data,
test_filter)
print(np.max( | np.abs(buffer - my_output) | numpy.abs |
# coding: utf-8
# In[1]:
# Package imports
import numpy as np
import matplotlib.pyplot as plt
from testCases import *
import sklearn
import sklearn.datasets
import sklearn.linear_model
from planar_utils import plot_decision_boundary, sigmoid, load_planar_dataset, load_extra_datasets
get_ipython().run_line_magic('matplotlib', 'inline')
np.random.seed(1) # set a seed so that the results are consistent
# ## Neural Network model
#
# You are going to train a Neural Network with a single hidden layer.
#
# **Here is our model**:
# <img src="images/classification_kiank.png" style="width:600px;height:300px;">
#
# **Mathematically**:
#
# For one example $x^{(i)}$:
# $$z^{[1] (i)} = W^{[1]} x^{(i)} + b^{[1] (i)}\tag{1}$$
# $$a^{[1] (i)} = \tanh(z^{[1] (i)})\tag{2}$$
# $$z^{[2] (i)} = W^{[2]} a^{[1] (i)} + b^{[2] (i)}\tag{3}$$
# $$\hat{y}^{(i)} = a^{[2] (i)} = \sigma(z^{ [2] (i)})\tag{4}$$
# $$y^{(i)}_{prediction} = \begin{cases} 1 & \mbox{if } a^{[2](i)} > 0.5 \\ 0 & \mbox{otherwise } \end{cases}\tag{5}$$
#
# Given the predictions on all the examples, you can also compute the cost $J$ as follows:
# $$J = - \frac{1}{m} \sum\limits_{i = 0}^{m} \large\left(\small y^{(i)}\log\left(a^{[2] (i)}\right) + (1-y^{(i)})\log\left(1- a^{[2] (i)}\right) \large \right) \small \tag{6}$$
#
# **Reminder**: The general methodology to build a Neural Network is to:
# 1. Define the neural network structure ( # of input units, # of hidden units, etc).
# 2. Initialize the model's parameters
# 3. Loop:
# - Implement forward propagation
# - Compute loss
# - Implement backward propagation to get the gradients
# - Update parameters (gradient descent)
#
# You often build helper functions to compute steps 1-3 and then merge them into one function we call `nn_model()`. Once you've built `nn_model()` and learnt the right parameters, you can make predictions on new data.
# ### Defining the neural network structure ####
#
# **Exercise**: Define three variables:
# - n_x: the size of the input layer
# - n_h: the size of the hidden layer (set this to 4)
# - n_y: the size of the output layer
#
# **Hint**: Use shapes of X and Y to find n_x and n_y. Also, hard code the hidden layer size to be 4.
# In[2]:
def layer_sizes(X, Y):
"""
Arguments:
X -- input dataset of shape (input size, number of examples)
Y -- labels of shape (output size, number of examples)
Returns:
n_x -- the size of the input layer
n_h -- the size of the hidden layer
n_y -- the size of the output layer
"""
n_x = X.shape[0]
n_h = 4
n_y = Y.shape[0]
return (n_x, n_h, n_y)
# In[3]:
X_assess, Y_assess = layer_sizes_test_case()
(n_x, n_h, n_y) = layer_sizes(X_assess, Y_assess)
if __name__=='__main__':
print("The size of the input layer is: n_x = " + str(n_x))
print("The size of the hidden layer is: n_h = " + str(n_h))
print("The size of the output layer is: n_y = " + str(n_y))
# **Expected Output** (these are not the sizes you will use for your network, they are just used to assess the function you've just coded).
#
# <table style="width:20%">
# <tr>
# <td>**n_x**</td>
# <td> 5 </td>
# </tr>
#
# <tr>
# <td>**n_h**</td>
# <td> 4 </td>
# </tr>
#
# <tr>
# <td>**n_y**</td>
# <td> 2 </td>
# </tr>
#
# </table>
# ### Initialize the model's parameters ####
#
# **Exercise**: Implement the function `initialize_parameters()`.
#
# **Instructions**:
# - Make sure your parameters' sizes are right. Refer to the neural network figure above if needed.
# - You will initialize the weights matrices with random values.
# - Use: `np.random.randn(a,b) * 0.01` to randomly initialize a matrix of shape (a,b).
# - You will initialize the bias vectors as zeros.
# - Use: `np.zeros((a,b))` to initialize a matrix of shape (a,b) with zeros.
# In[4]:
def initialize_parameters(n_x, n_h, n_y):
"""
Argument:
n_x -- size of the input layer
n_h -- size of the hidden layer
n_y -- size of the output layer
Returns:
params -- python dictionary containing your parameters:
W1 -- weight matrix of shape (n_h, n_x)
b1 -- bias vector of shape (n_h, 1)
W2 -- weight matrix of shape (n_y, n_h)
b2 -- bias vector of shape (n_y, 1)
"""
np.random.seed(2)
W1 = np.random.randn(n_h, n_x)*0.01
b1 = np.zeros(shape=(n_h,1))
W2 = np.random.randn(n_y, n_h)*0.01
b2 = np.zeros(shape=(n_y,1))
assert (W1.shape == (n_h, n_x))
assert (b1.shape == (n_h, 1))
assert (W2.shape == (n_y, n_h))
assert (b2.shape == (n_y, 1))
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
# In[5]:
n_x, n_h, n_y = initialize_parameters_test_case()
parameters = initialize_parameters(n_x, n_h, n_y)
if __name__=='__main__':
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# **Expected Output**:
#
# <table style="width:90%">
# <tr>
# <td>**W1**</td>
# <td> [[-0.00416758 -0.00056267]
# [-0.02136196 0.01640271]
# [-0.01793436 -0.00841747]
# [ 0.00502881 -0.01245288]] </td>
# </tr>
#
# <tr>
# <td>**b1**</td>
# <td> [[ 0.]
# [ 0.]
# [ 0.]
# [ 0.]] </td>
# </tr>
#
# <tr>
# <td>**W2**</td>
# <td> [[-0.01057952 -0.00909008 0.00551454 0.02292208]]</td>
# </tr>
#
#
# <tr>
# <td>**b2**</td>
# <td> [[ 0.]] </td>
# </tr>
#
# </table>
#
#
# ### The Loop ####
#
# **Question**: Implement `forward_propagation()`.
#
# **Instructions**:
# - Look above at the mathematical representation of your classifier.
# - You can use the function `sigmoid()`. It is built-in (imported) in the notebook.
# - You can use the function `np.tanh()`. It is part of the numpy library.
# - The steps you have to implement are:
# 1. Retrieve each parameter from the dictionary "parameters" (which is the output of `initialize_parameters()`) by using `parameters[".."]`.
# 2. Implement Forward Propagation. Compute $Z^{[1]}, A^{[1]}, Z^{[2]}$ and $A^{[2]}$ (the vector of all your predictions on all the examples in the training set).
# - Values needed in the backpropagation are stored in "`cache`". The `cache` will be given as an input to the backpropagation function.
# In[6]:
def forward_propagation(X, parameters):
"""
Argument:
X -- input data of size (n_x, m)
parameters -- python dictionary containing your parameters (output of initialization function)
Returns:
A2 -- The sigmoid output of the second activation
cache -- a dictionary containing "Z1", "A1", "Z2" and "A2"
"""
# Retrieve each parameter from the dictionary "parameters"
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
# Implement Forward Propagation to calculate A2 (probabilities)
Z1 = np.matmul(W1,X) + b1
A1 = np.tanh(Z1)
Z2 = | np.matmul(W2,A1) | numpy.matmul |
import logging
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import scipy
from scipy import stats
import datetime
import networkx as nx
import matplotlib as mpl
import sys
import tqdm
from matplotlib import animation, rc
from matplotlib.ticker import MaxNLocator
from IPython.display import clear_output, HTML, Image
from neurolib.utils import atlases
import neurolib.utils.paths as paths
import neurolib.utils.functions as func
class Brainplot:
def __init__(self, Cmat, data, nframes=None, dt=0.1, fps=25, labels=False, darkmode=True):
self.sc = Cmat
self.n = self.sc.shape[0]
self.data = data
self.darkmode = darkmode
self.G = nx.Graph()
self.G.add_nodes_from(range(self.n))
coords = {}
atlas = atlases.AutomatedAnatomicalParcellation2()
for i, c in enumerate(atlas.coords()):
coords[i] = [c[0], c[1]]
self.position = coords
self.edge_threshold = 0.01
self.fps = fps
self.dt = dt
nframes = nframes or int((data.shape[1] * self.dt / 1000) * self.fps) # 20 fps default
logging.info(f"Defaulting to {nframes} frames at {self.fps} fp/s")
self.nframes = nframes
self.frame_interval = self.data.shape[1] // self.nframes
self.interval = int(self.frame_interval * self.dt)
self.draw_labels = labels
for t in range(self.n):
# print t
for s in range(t):
# print( n, t, s)
if self.sc[t, s] > self.edge_threshold:
# print( 'edge', t, s, self.sc[t,s])
self.G.add_edge(t, s)
# node color map
self.cmap = plt.get_cmap("plasma") # mpl.cm.cool
# default style
self.imagealpha = 0.5
self.edgecolor = "k"
self.edgealpha = 0.8
self.edgeweight = 1.0
self.nodesize = 50
self.nodealpha = 0.8
self.vmin = 0
self.vmax = 50
self.lw = 0.5
if self.darkmode:
plt.style.use("dark")
# let's choose a cyberpunk style for the dark theme
self.edgecolor = "#37f522"
self.edgeweight = 0.5
self.edgealpha = 0.6
self.nodesize = 40
self.nodealpha = 0.8
self.vmin = 0
self.vmax = 30
self.cmap = plt.get_cmap("cool") # mpl.cm.cool
self.imagealpha = 0.5
self.lw = 1
fname = os.path.join("neurolib", "data", "resources", "clean_brain_white.png")
else:
# plt.style.use("light")
fname = os.path.join("neurolib", "data", "resources", "clean_brain.png")
self.imgTopView = mpl.image.imread(fname)
self.pbar = tqdm.tqdm(total=self.nframes)
def update(self, i, ax, ax_rates=None, node_color=None, node_size=None, node_alpha=None, clear=True):
frame = int(i * self.frame_interval)
node_color = node_color or self.data[:, frame]
node_size = node_size or self.nodesize
node_alpha = node_alpha or self.nodealpha
if clear:
ax.cla()
im = ax.imshow(self.imgTopView, alpha=self.imagealpha, origin="upper", extent=[40, 202, 28, 240])
ns = nx.draw_networkx_nodes(
self.G,
pos=self.position,
node_color=node_color,
cmap=self.cmap,
vmin=self.vmin,
vmax=self.vmax,
node_size=node_size,
alpha=node_alpha,
ax=ax,
edgecolors="k",
)
es = nx.draw_networkx_edges(
self.G, pos=self.position, alpha=self.edgealpha, edge_color=self.edgecolor, ax=ax, width=self.edgeweight
)
labels = {}
for ni in range(self.n):
labels[ni] = str(ni)
if self.draw_labels:
nx.draw_networkx_labels(self.G, self.position, labels, font_size=8)
ax.set_axis_off()
ax.set_xlim(20, 222)
ax.set_ylim(25, 245)
# timeseries
if ax_rates:
ax_rates.cla()
ax_rates.set_xticks([])
ax_rates.set_yticks([])
ax_rates.set_ylabel("Brain activity", fontsize=8)
t = np.linspace(0, frame * self.dt, frame)
ax_rates.plot(t, np.mean(self.data[:, :frame], axis=0).T, lw=self.lw)
t_total = self.data.shape[1] * self.dt
ax_rates.set_xlim(0, t_total)
self.pbar.update(1)
plt.tight_layout()
if clear:
clear_output(wait=True)
def plot_rates(model):
plt.figure(figsize=(4, 1))
plt_until = 10 * 1000
plt.plot(model.t[model.t < plt_until], model.output[:, model.t < plt_until].T, lw=0.5)
def plot_brain(
model, ds, color=None, size=None, title=None, cbar=True, cmap="RdBu", clim=None, cbarticks=None, cbarticklabels=None
):
"""Dump and easy wrapper around the brain plotting function.
:param color: colors of nodes, defaults to None
:type color: numpy.ndarray, optional
:param size: size of the nodes, defaults to None
:type size: numpy.ndarray, optional
:raises ValueError: Raises error if node size is too big.
"""
s = Brainplot(ds.Cmat, model.output, fps=10, darkmode=False)
s.cmap = plt.get_cmap(cmap)
dpi = 300
fig = plt.figure(dpi=dpi)
ax = plt.gca()
if title:
ax.set_title(title, fontsize=26)
if clim is None:
s.vmin, s.vmax = np.min(color), np.max(color)
else:
s.vmin, s.vmax = clim[0], clim[1]
if size is not None:
node_size = size
else:
# some weird scaling of the color to a size
def norm(what):
what = what.copy()
what -= np.min(what)
what /= np.max(what)
return what
node_size = list(np.exp((norm(color) + 2) * 2))
if isinstance(color, np.ndarray):
color = list(color)
if isinstance(node_size, np.ndarray):
node_size = list(node_size)
if np.max(node_size) > 2000:
raise ValueError(f"node_size too big: {np.max(node_size)}")
s.update(0, ax, node_color=color, node_size=node_size, clear=False)
if cbar:
cbaxes = fig.add_axes([0.68, 0.1, 0.015, 0.7])
sm = plt.cm.ScalarMappable(cmap=s.cmap, norm=plt.Normalize(vmin=s.vmin, vmax=s.vmax))
cbar = plt.colorbar(sm, cbaxes, ticks=cbarticks)
cbar.ax.tick_params(labelsize=16)
if cbarticklabels:
cbar.ax.set_yticklabels(cbarticklabels)
# other plotting
def plot_average_timeseries(model, xticks=False, kwargs={}, xlim=None, figsize=(8, 1)):
# print("{} Peaks found ({} p/s)".format(len(peaks), len(peaks)/(model.params['duration']/1000)))
plt.figure(figsize=figsize, dpi=300)
# cut rates if xlim is given
if xlim:
rates = model.output[:, (model.t / 1000 > xlim[0]) & (model.t / 1000 < xlim[1])]
t = model.t[(model.t / 1000 > xlim[0]) & (model.t / 1000 < xlim[1])]
print(rates.shape)
else:
rates = model.output
t = model.t
plt.plot(t / 1000, np.mean(rates, axis=0), **kwargs)
# plt.plot(model.t/1000, signal, lw=2, label = 'smoothed')
plt.autoscale(enable=True, axis="x", tight=True)
# for p in peaks:
# plt.vlines(p*params['dt']*10, 0, 0.008, color='b', lw=2, alpha=0.6)
# plt.xlim(0, 10000)
# plt.plot(model.t/1000, states*10, lw=2, c='C1', alpha=0.4, label='detected state')
# plt.xlabel("Time [s]")
if not xticks:
plt.xticks([])
if xlim:
plt.xlim(*xlim)
plt.ylabel("Rate [Hz]")
# plt.legend(loc=1, fontsize=8)
# import matplotlib as mpl
# mpl.rcParams['axes.spines.left'] = False
# mpl.rcParams['axes.spines.right'] = False
# mpl.rcParams['axes.spines.top'] = False
# mpl.rcParams['axes.spines.bottom'] = False
plt.gca().spines["right"].set_visible(False)
plt.gca().spines["top"].set_visible(False)
plt.gca().spines["left"].set_visible(False)
plt.gca().spines["bottom"].set_visible(False)
plt.tight_layout()
set_axis_size(*figsize)
def detectSWStates(output, threshold=1):
states = np.zeros(output.shape)
# super_threshold_indices = rates > threshold
# works with 1D thresholds, per node
super_threshold_indices = np.greater(output.T, threshold).T
states[super_threshold_indices] = 1
return states
def filter_long_states(model, states):
states = states.copy()
LENGTH_THRESHOLD = int(50 / model.params["dt"]) # ms
for s in states:
lengths = get_state_lengths(s)
current_idx = 0
last_long_state = lengths[0][0] # first state
for state, length in lengths:
if length >= LENGTH_THRESHOLD:
last_long_state = state
else:
s[current_idx : current_idx + length] = last_long_state
current_idx += length
return states
def detectSWs(model, up_thresh=0.01, detectSWStates_kw=None, filter_long=True):
threshold_per_node = up_thresh * np.max(model.output, axis=1) # as per Nghiem et al., 2020, Renart et al., 2010
states = np.zeros(model.output.shape)
if detectSWStates_kw:
logging.warning(f"Warning: detectSWStates_kw is not implemented anymore, using up_thresh = {up_thresh}")
states = detectSWStates(model.output, threshold=threshold_per_node)
# for i, o in enumerate(model.output):
# smoothed = o # scipy.ndimage.gaussian_filter(o, sigma=500)
# s = detectSWStates(model.t, smoothed, **detectSWStates_kw)
# states[i, :] = s
if filter_long:
states = filter_long_states(model, states)
return states
def get_involvement(states, down=True):
"""Returns involvement in up- and down-states.
:param states: state array (NxT)
:type states: np.ndarray
:return: Involvement time series (1xT)
:rtype: np.ndarray
"""
up_involvement = np.sum(states, axis=0) / states.shape[0]
if down:
return 1 - up_involvement
else:
return up_involvement
def plot_states_timeseries(model, states, title=None, labels=True, cmap="plasma"):
figsize = (8, 2)
plt.figure(figsize=figsize)
plt.imshow(
states,
extent=[0, states.shape[1] * model.params.dt / 1000, 0, states.shape[0]],
aspect="auto",
cmap=plt.cm.get_cmap(cmap, 2),
)
if labels:
plt.xlabel("Time [s]")
plt.ylabel("Node")
else:
plt.xticks([])
plt.yticks([])
if title:
plt.title(title)
plt.autoscale(enable=True, axis="x", tight=True)
if labels:
cbar = plt.colorbar(pad=0.02)
cbar.set_ticks([0.25, 0.75])
cbar.ax.set_yticklabels(["Down", "Up"], rotation=90, va="center")
cbar.ax.tick_params(width=0, labelsize=14)
plt.tight_layout()
set_axis_size(*figsize)
def plot_involvement_timeseries(model, involvement):
fig, axs = plt.subplots(1, 2, figsize=(10, 4), gridspec_kw={"width_ratios": [4, 1]})
axs[0].set_title("Involvement of brain areas in SO events")
axs[0].plot(model.t / 1000, involvement * 100, c="C0")
axs[0].set_ylabel("Involvement [%]")
axs[0].set_xlabel("Time [s]")
axs[0].set_ylim([0, 100])
axs[0].set_aspect("auto")
axs[1].hist(involvement * 100, bins=10, orientation="horizontal", density=True, rwidth=0.8, edgecolor="k")
axs[1].set_yticks([])
axs[1].set_xlabel("KDE")
axs[1].set_ylim([0, 100])
plt.tight_layout()
def plot_degree_duration_scatterplot(model, states, ds, lingres=False, color_down="C0", color_up="C1"):
figsize = (2, 2)
plt.figure(figsize=figsize)
area_downtimes = np.sum(states == 0, axis=1) / model.output.shape[1] * 100
area_uptimes = np.sum(states == 1, axis=1) / model.output.shape[1] * 100
fig, ax = plt.subplots(1, 1, figsize=(3, 3))
degrees = [(np.sum(ds.Cmat, axis=1))[i] / np.max(np.sum(ds.Cmat, axis=1)) for i in range(ds.Cmat.shape[0])]
ax.scatter(
degrees,
area_uptimes,
s=14,
c=color_up,
edgecolor="black",
linewidth=0.5,
label="up-state",
)
ax.scatter(
degrees,
area_downtimes,
s=14,
c=color_down,
edgecolor="black",
linewidth=0.5,
label="down-state",
)
if lingres:
plot_linregress(degrees, area_downtimes, kwargs={"c": color_down, "zorder": -2})
if lingres:
plot_linregress(degrees, area_uptimes, kwargs={"c": color_up, "zorder": -2})
for i in range(ds.Cmat.shape[0]):
degree = (np.sum(ds.Cmat, axis=1))[i] / np.max(np.sum(ds.Cmat, axis=1))
plt.legend(fontsize=12, frameon=False, markerscale=1.8, handletextpad=-0.5)
ax.set_xlabel("Node degree")
ax.set_ylabel("Time spent [%]")
plt.tight_layout()
set_axis_size(*figsize)
def plot_transition_phases(node_mean_phases_down, node_mean_phases_up, atlas):
def hide_axis(ax):
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
names = []
phases_down = []
phases_up = []
ipsilateral_mean_down_phase = [
(i + k) / 2 for i, k in zip(node_mean_phases_down[0::2], node_mean_phases_down[1::2])
]
ipsilateral_mean_up_phase = [(i + k) / 2 for i, k in zip(node_mean_phases_up[0::2], node_mean_phases_up[1::2])]
# ipsilateral_names = [i[:-2] for i, k in zip(atlas.names()[0::2], atlas.names()[1::2])]
ipsilateral_names = [
f"{i[:-2]} ({nr*2}, {nr*2+1})" for nr, (i, k) in enumerate(zip(atlas.names()[0::2], atlas.names()[1::2]))
]
# clean up names
for i in range(len(ipsilateral_names)):
ipsilateral_names[i] = ipsilateral_names[i].replace("_", " ")
for i, ipsi_region in enumerate(np.argsort(ipsilateral_mean_down_phase)):
# print(i, region, node_mean_phases_down[region], atlas.names()[region], atlas.coords()[region][2])
# y_coord = (i%80)/30
names.append(ipsilateral_names[ipsi_region])
phases_down.append(ipsilateral_mean_down_phase[ipsi_region])
phases_up.append(ipsilateral_mean_up_phase[ipsi_region])
names = [n.replace("_", " ") for n in names]
fig, ax = plt.subplots(1, 1, figsize=(6, 3))
# left=-np.asarray(phases[::-1]),
ax.bar(names, phases_up, lw=1, ls="--", color="C0", label="up-transition")
ax.bar(names, phases_down, lw=1, ls="--", color="C1", label="down-transition")
# ax.tick_params(axis="both", which="major", labelsize=8)
# ax.yaxis.set_tick_params(width=1)
ax.autoscale(enable=True, axis="x", tight=True)
ax.legend(fontsize=8, loc=1, bbox_to_anchor=(0.98, 1.0))
ax.set_ylabel("Phase of transition", fontsize=12)
# ax.text(0.2, 80, "Up-state", fontsize=10)
# ax.text(-0.2, 80, "Down-state", fontsize=10, ha="right")
# ax.set_title("Mean transition phase")
hide_axis(ax)
plt.grid(alpha=0.5, lw=0.5)
ax.tick_params(axis="y", labelsize=10)
ax.tick_params(axis="x", which="major", labelsize=7, rotation=90)
plt.tight_layout(pad=0)
def plot_transition_phases_against_eachother(node_mean_phases_down, node_mean_phases_up):
figsize = (2, 2)
figsize = np.multiply(figsize, 0.75)
plt.figure(figsize=figsize)
plt.scatter(
node_mean_phases_down,
node_mean_phases_up,
s=8,
edgecolor="black",
linewidth=0.5,
c="lightgray",
)
plot_linregress(node_mean_phases_down, node_mean_phases_up, kwargs={"c": "k", "alpha": 0.5, "zorder": -2})
plt.xlabel("Mean down-phase")
plt.ylabel("Up-phase")
plt.tight_layout(pad=0)
set_axis_size(*figsize)
def plot_state_brain_areas(model, states, atlas, ds, color_down="C0", color_up="C1"):
fig, axs = plt.subplots(1, 1, figsize=(6, 3))
# axs.set_title("Time spent in up- and down-state per brain area", fontsize=12)
area_downtimes = np.sum(states == 0, axis=1) / model.output.shape[1] * 100
area_uptimes = np.sum(states == 1, axis=1) / model.output.shape[1] * 100
ipsilateral_downtimes = [(i + k) / 2 for i, k in zip(area_downtimes[0::2], area_downtimes[1::2])]
ipsilateral_uptimes = [(i + k) / 2 for i, k in zip(area_uptimes[0::2], area_uptimes[1::2])]
ipsilateral_uptimes_diff = [(i - k) for i, k in zip(area_uptimes[0::2], area_uptimes[1::2])]
ipsilateral_names = [
f"{i[:-2]} ({nr*2}, {nr*2+1})" for nr, (i, k) in enumerate(zip(atlas.names()[0::2], atlas.names()[1::2]))
]
# clean up names
for i in range(len(ipsilateral_names)):
ipsilateral_names[i] = ipsilateral_names[i].replace("_", " ")
axs.bar(
ipsilateral_names,
ipsilateral_uptimes,
bottom=ipsilateral_downtimes,
edgecolor="k",
color="C0",
linewidth="0.2",
zorder=-10,
label="up-state",
)
axs.bar(ipsilateral_names, ipsilateral_downtimes, edgecolor="k", color="C1", linewidth="0.2", label="down-state")
axs.legend(fontsize=8, loc=1, bbox_to_anchor=(0.98, 0.9))
axs.autoscale(enable=True, axis="x", tight=True)
axs.spines["right"].set_visible(False)
axs.spines["top"].set_visible(False)
axs.spines["left"].set_visible(False)
axs.spines["bottom"].set_visible(False)
axs.tick_params(axis="x", which="major", labelsize=7, rotation=90)
# axs[0].set_xlabel("Brain area")
axs.set_ylabel("Time spent [%]", fontsize=12)
axs.set_yticks([0, 25, 50, 75, 100])
axs.tick_params(axis="y", labelsize=10)
# for i in range(ds.Cmat.shape[0]):
# degree = (np.sum(ds.Cmat, axis=1))[i] / np.max(np.sum(ds.Cmat, axis=1))
# axs[1].scatter(degree, area_downtimes[i], s=5, c="C1", edgecolor="black", linewidth="0.2")
# axs[1].scatter(degree, area_uptimes[i], s=5, c="C0", edgecolor="black", linewidth="0.2")
# axs[1].set_xlabel("Node degree")
# axs[1].set_xticks([0, 1, 2, 3])
# axs[1].set_yticklabels([])
plt.tight_layout(pad=0)
return area_downtimes, area_uptimes
def plot_involvement_durations(model, states, involvement, nbins=6, legend=False):
invs = {0: [], 1: []}
lens = {0: [], 1: []}
for s in states[:]:
lengths = get_state_lengths(s)
current_idx = 0
for state, length in lengths:
state = int(state)
# compuite average involvement during this state
mean_involvement = np.mean(involvement[current_idx : current_idx + length])
lens[state].append(length * model.params.dt) # convert to seconds
invs[state].append(mean_involvement)
current_idx += length
from scipy.stats import binned_statistic
figsize = (3, 2)
figsize = np.multiply(figsize, 0.75)
plt.figure(figsize=figsize)
up_bin_means, bin_edges, _ = binned_statistic(invs[1], lens[1], bins=nbins, range=(0, 1))
plt.bar(bin_edges[:-1], up_bin_means[::-1], width=0.04, edgecolor="k", color="C0", label="up-state")
down_bin_means, bin_edges, _ = binned_statistic(invs[0], lens[0], bins=nbins, range=(0, 1))
plt.bar(bin_edges[:-1] + 0.05, down_bin_means, width=0.04, edgecolor="k", color="C1", label="down-state")
if legend:
plt.legend(fontsize=8)
plt.xticks([0, 0.5, 1], [0, 50, 100])
axs = plt.gca()
axs.spines["right"].set_visible(False)
axs.spines["top"].set_visible(False)
axs.spines["left"].set_visible(False)
plt.gca().tick_params(axis="both", direction="out", which="major", bottom=True, left=True)
plt.xlabel("Involvement [%]")
plt.ylabel("Duration [ms]")
plt.tight_layout()
set_axis_size(*figsize)
def get_state_durations_flat(model, states):
durations = get_state_lengths(states)
ups, downs = get_updown_lengths(durations)
dt_to_s = model.params.dt / 1000 # return seconds
flat_ups = [u * dt_to_s for up in ups for u in up]
flat_downs = [d * dt_to_s for down in downs for d in down]
return flat_ups, flat_downs
def plot_state_durations(model, states, legend=True, alpha=1.0):
durations = get_state_lengths(states)
ups, downs = get_updown_lengths(durations)
dt_to_s = model.params.dt / 1000
flat_ups = [u * dt_to_s for up in ups for u in up]
flat_downs = [d * dt_to_s for down in downs for d in down]
# figsize = (3, 2.0)
figsize = (2, 1.7)
# figsize = np.multiply(figsize, 0.75)
plt.figure(figsize=figsize)
plt.hist(flat_ups, color="C0", label="up", edgecolor="k", linewidth=0.75)
plt.hist(flat_downs, color="C1", label="down", edgecolor="k", linewidth=0.75, alpha=alpha)
if legend:
plt.legend(fontsize=12, frameon=False)
plt.gca().set_yscale("log")
plt.gca().xaxis.set_major_locator(plt.MaxNLocator(4))
plt.ylabel("Log-probability")
plt.yticks([])
plt.xlabel("Duration [s]")
plt.gca().tick_params(axis="x", direction="out", which="major", bottom=True, left=True)
ax = plt.gca()
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["left"].set_visible(False)
# ax.spines["bottom"].set_visible(False)
plt.tight_layout()
set_axis_size(*figsize)
def plot_involvement_distribution(model, involvement, remove_xticks=True, color_localglobal=False):
# figsize = (3, 2)
figsize = (2, 1.7)
# figsize = np.multiply(figsize, 0.75)
plt.figure(figsize=figsize)
N, bins, patches = plt.hist(involvement * 100, bins=12, density=True, rwidth=0.8, edgecolor="k", color="C1")
if color_localglobal:
for i in range(0, len(patches) // 2):
patches[i].set_facecolor("C0")
for i in range(len(patches) // 2, len(patches)):
patches[i].set_facecolor("C1")
plt.xticks([0, 50, 100])
if remove_xticks:
plt.yticks([])
else:
y_vals = plt.gca().get_yticks()
plt.gca().set_yticklabels(["{:3.0f}".format(x * 100) for x in y_vals])
plt.ylabel("Probability")
plt.xlabel("Involvement [%]")
plt.gca().tick_params(axis="x", direction="out", which="major", bottom=True, left=True)
plt.xlim([0, 100])
ax = plt.gca()
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["left"].set_visible(False)
# ax.spines["bottom"].set_visible(False)
max_y = plt.gca().get_ylim()[1] * 1.1
plt.ylim(0, max_y * 1.1)
# plt.vlines([50], 0, max_y, linestyles="--", colors="#363638", alpha=0.9)
plt.text(25, max_y * 0.95, "local", fontsize=14, color="C0", ha="center")
plt.text(75, max_y * 0.95, "global", fontsize=14, color="C1", ha="center")
plt.tight_layout()
set_axis_size(*figsize)
def plot_involvement_mean_amplitude(model, involvement, skip=300, lingress=True):
figsize = (3, 2)
figsize = np.multiply(figsize, 0.75)
plt.figure(figsize=figsize)
rates = np.mean(model.output, axis=0)[::skip]
plt.scatter(
involvement[::skip] * 100,
rates,
# scipy.stats.zscore(np.mean(model.output, axis=0)[::skip]),
s=10,
edgecolor="w",
linewidth=0.2,
c="C1",
alpha=0.7,
)
# plt.gca().axhline(0, linestyle="--", lw=1, color="#ABB2BF", zorder=-1)
plt.xlim(0, 100)
plt.xticks([0, 50, 100])
plt.xlabel("Involvement [%]")
plt.ylabel("Rate [Hz]")
if lingress:
slope, intercept, r_value, p_value, std_err = plot_linregress(
involvement[::skip] * 100, rates, kwargs={"c": "k", "alpha": 0.75}
)
plt.text(50, np.max(rates) * 0.75, f"$R^2={r_value**2:0.2f}$")
axs = plt.gca()
axs.spines["right"].set_visible(False)
axs.spines["top"].set_visible(False)
axs.spines["left"].set_visible(False)
axs.spines["bottom"].set_visible(False)
plt.gca().tick_params(axis="both", direction="out", which="major", bottom=True, left=True)
plt.tight_layout()
set_axis_size(*figsize)
def get_state_lengths(xs):
"""
Get's the length of successive elements in a list.
Useful for computing inter-spike-intervals (ISI) or the length
of states.
Example: get_state_lengths([0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0])
Returns: [(0, 4), (1, 4), (0, 2), (1, 1), (0, 1)]
:param xs: Input list with successive states
:type xs: list
:return: List of (state, length) tuples
:rtype: list
"""
import itertools
if np.array(xs).ndim == 1:
durations = [(x, len(list(y))) for x, y in itertools.groupby(xs)]
elif np.array(xs).ndim > 1:
durations = [get_state_lengths(xss) for xss in xs]
return durations
def get_updown_lengths(durations):
"""Returns the length of all up- and down-states for each node"""
ups = []
downs = []
for i, l in enumerate(durations):
ups.append([u[1] for u in l if u[0] == 1.0])
downs.append([u[1] for u in l if u[0] == 0.0])
return ups, downs
def plot_linregress(x, y, plot=True, kwargs={}):
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
print(f"slope = {slope}, intercept = {intercept}, r_value = {r_value}, p_value = {p_value}, std_err = {std_err}")
if plot:
plt.plot(x, intercept + x * slope, **kwargs)
return slope, intercept, r_value, p_value, std_err
def plot_down_up_durations(model, durations, ds, plot_regression=False):
import matplotlib.cm as cm
all_ups = []
all_downs = []
all_colors = []
for n, dur in enumerate(durations):
# make sure to start with a down-state
skip = 0 if dur[0][0] == 0.0 else 1
down_durations = [d[1] * model.params.dt / 1000 for d in dur[skip::2]]
up_durations = [d[1] * model.params.dt / 1000 for d in dur[skip + 1 :: 2]]
# normalized degree [0, 1]
degree = (np.sum(ds.Cmat, axis=1))[n] / np.max(np.sum(ds.Cmat, axis=1))
for d, u in zip(down_durations, up_durations):
all_ups.append(u)
all_downs.append(d)
all_colors.append(degree)
fig_scale = 1.2
plt.figure(figsize=(3 * fig_scale, 2.5 * fig_scale))
plt.scatter(all_downs, all_ups, s=2, c=all_colors, cmap="plasma", alpha=0.8)
plt.ylabel("Next up-state duration [s]")
plt.xlabel("Down-state duration [s]")
plot_linregress(all_downs, all_ups, plot=plot_regression)
cbar = plt.colorbar()
cbar.set_label(label="Node degree", size=10)
cbar.ax.tick_params(labelsize=10)
plt.show()
def get_transition_phases(states, phase):
transitions = np.diff(states)
node_mean_phases_down = []
node_mean_phases_up = []
for ni, trs in enumerate(transitions):
down_phases = []
up_phases = []
for i, t in enumerate(trs):
if t == -1:
down_phases.append(float(phase[i]))
if t == 1:
up_phases.append(float(phase[i]))
mean_down_phase = scipy.stats.circmean(down_phases, high=np.pi, low=-np.pi, nan_policy="raise")
mean_up_phase = scipy.stats.circmean(up_phases, high=np.pi, low=-np.pi, nan_policy="raise")
print(f"Node {ni}: mean_down_phase = {mean_down_phase}, mean_up_phase = {mean_up_phase}")
node_mean_phases_down.append(mean_down_phase)
node_mean_phases_up.append(mean_up_phase)
return node_mean_phases_down, node_mean_phases_up
def phase_transition_coordinate(node_mean_phases_down, node_mean_phases_up, atlas):
coords = atlas.coords()
minmax_coords = [int(np.min([c[1] for c in coords])), int(np.max([c[1] for c in coords]))]
figsize = (2, 2)
plt.figure(figsize=figsize)
# ATTENTION!!!! MULTIPLYING COORDINATES WITH -1 SO THE PLOT GOES ANTERIOR TO POSTERIOR
y_coords = np.array([c[1] for c in coords]) * -1
plt.scatter(y_coords, node_mean_phases_down, c="C1", s=14, edgecolor="k", linewidth=0.5, label="down")
plt.scatter(y_coords, node_mean_phases_up, c="C0", s=14, edgecolor="k", linewidth=0.5, label="up")
# plt.legend(fontsize=8)
down_slope, down_intercept, down_r_value, down_p_value, down_std_err = stats.linregress(
y_coords, node_mean_phases_down
)
print(f"Down transitions: slope: {down_slope} r: {down_r_value}, p: {down_p_value}")
plt.plot(y_coords, (lambda c: down_intercept + down_slope * c)(y_coords), c="C1", label="x", zorder=-5)
up_slope, up_intercept, up_r_value, up_p_value, up_std_err = stats.linregress(y_coords, node_mean_phases_up)
print(f"Up transitions: slope: {up_slope} r: {up_r_value}, p: {up_p_value}")
plt.plot(y_coords, (lambda c: up_intercept + up_slope * c)(y_coords), c="C0", label="x", zorder=-5)
plt.xlabel("Coordinate")
plt.xticks([np.min(y_coords) + 30, np.max(y_coords) - 30], [f"Anterior", f"Posterior"])
plt.ylabel("Transition phase $\phi$")
plt.tight_layout()
set_axis_size(*figsize)
return (
down_slope,
down_intercept,
down_r_value,
down_p_value,
down_std_err,
up_slope,
up_intercept,
up_r_value,
up_p_value,
up_std_err,
)
def kuramoto(events):
import tqdm
# fill in event at the end of the timeseries
events[:, -1] = True
phases = []
logging.info("Determining phases ...")
for n, ev in tqdm.tqdm(enumerate(events), total=len(events)):
maximalist = np.where(ev)[0]
phases.append([])
last_event = 0
for m in maximalist:
for t in range(last_event, m):
phi = 2 * np.pi * float(t - last_event) / float(m - last_event)
phases[n].append(phi)
last_event = m
phases[n].append(2 * np.pi)
logging.info("Computing Kuramoto order parameter ...")
# determine kuramoto order paramter
kuramoto = []
nTraces = events.shape[0]
for t in tqdm.tqdm(range(events.shape[1]), total=events.shape[1]):
R = 1j * 0
for n in range(nTraces):
R += np.exp(1j * phases[n][t])
R /= nTraces
kuramoto.append(np.absolute(R))
return kuramoto
def plot_phases_and_involvement(model, involvement, phases, states):
fig, axs = plt.subplots(2, 1, figsize=(4, 2), sharex=True)
up_transitions = np.diff(states) > 0
down_transitions = np.diff(states) < 0
for i, ups in enumerate(up_transitions):
axs[0].vlines(np.where(ups)[0] * model.params.dt / 1000, 0, 3, lw=0.2, alpha=1, color="C0")
for i, downs in enumerate(down_transitions):
axs[0].vlines(np.where(downs)[0] * model.params.dt / 1000, -3, 0, lw=0.2, alpha=1, color="C1")
# annotate vlines
axs[0].text(7.7, 7.0, "up-transitions", fontsize=8)
axs[0].vlines(7.6, 6.8, 9.0, lw=1, color="C0")
axs[0].text(7.7, 4.4, "down-transitions", fontsize=8)
axs[0].vlines(7.6, 4.0, 6.2, lw=1, color="C1")
# axs[0].fill_between(model.t/1000, phases, 0, where=phases<0, zorder=-2, color='C0', alpha=0.8, label='$\phi < 0$')
# axs[0].fill_between(model.t/1000, phases, 0, where=phases>0, zorder=-2, color='C1', alpha=0.8, label='$\phi > 0$')
axs[0].plot(model.t / 1000, phases, zorder=2, lw=1, label="Global phase $\phi$", c="fuchsia")
axs[0].set_yticks([])
axs[0].legend(fontsize=8, frameon=False, loc="upper center", bbox_to_anchor=(0.5, 1.1), ncol=1)
axs[1].plot(model.t / 1000, np.mean(model.output, axis=0), c="k", alpha=1, lw=1, label="Mean rate")
axs[1].plot(model.t / 1000, involvement * 30, lw=1, c="C3", label="Involvement")
axs[1].set_xlim(0, 10)
axs[1].set_yticks([])
axs[1].set_xticks([0, 5, 10])
axs[1].tick_params(labelsize=8)
axs[1].set_xlabel("Time [s]", fontsize=8)
axs[1].legend(fontsize=8, frameon=False, loc="upper center", bbox_to_anchor=(0.5, 1.25), ncol=2)
for ax in axs:
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["bottom"].set_visible(False)
plt.tight_layout()
plt.subplots_adjust(wspace=0, hspace=0.2)
# used for RESULT-dynamical-regimes-EXP-5.ipynb
def plot_ts(
model,
plot_nodes=[0],
plot_alpha=1,
plot_mean_background=False,
mean_alpha=0.3,
shift_mean=False,
adaptation=False,
stimulus=False,
stimulus_scale=1.0,
log_power=False,
log_freq=False,
norm_power=True,
title_add="",
lw=1,
figsize=(4, 1.5),
labels=False,
crop_zero=None,
xlim=None,
savename=None,
fontsize=None,
n_ticks=2,
tick_width=1,
tick_length=3,
legend=False,
yticks=None,
):
_, axs = plt.subplots(1, 1, dpi=600, figsize=figsize)
title = " ".join([f"{p} = {model.params[p]}" for p in ["mue_ext_mean", "mui_ext_mean", "b", "sigma_ou"]])
title += " " + title_add
plot_col = "k"
if str(plot_nodes) == "all":
plot_output = model.output
plot_adaptation = model.outputs.IA
plot_col = None
elif str(plot_nodes) == "mean":
plot_output = np.mean(model.output, axis=0)
plot_adaptation = np.mean(model.outputs.IA, axis=0)
else:
plot_output = model.output[plot_nodes]
plot_adaptation = model.outputs.IA[plot_nodes]
plot_col = None if len(plot_nodes) > 1 else "k"
if plot_mean_background:
mean_signal = np.mean(model.output, axis=0)
# plot_adaptation = model.outputs.IA if str(plot_nodes) == "all" else model.outputs.IA[plot_nodes]
plot_t = model.t / 1000
plot_stimulus = model.params["ext_exc_current"] * stimulus_scale
if xlim:
plot_t = plot_t[xlim[0] : xlim[1]]
if plot_output.ndim == 2:
plot_output = plot_output[:, xlim[0] : xlim[1]]
if adaptation:
plot_adaptation = plot_adaptation[:, xlim[0] : xlim[1]]
else:
plot_output = plot_output[xlim[0] : xlim[1]]
if adaptation:
plot_adaptation = plot_adaptation[xlim[0] : xlim[1]]
if stimulus and model.params["ext_exc_current"].ndim > 0:
plot_stimulus = plot_stimulus[xlim[0] : xlim[1]]
if plot_mean_background:
mean_signal = mean_signal[xlim[0] : xlim[1]]
axs.plot(
plot_t,
plot_output.T,
alpha=plot_alpha,
lw=lw,
c=plot_col,
# label="Firing rate" if len(plot_nodes) == 1 else None,
)
# plot the mean signal in the background?
if plot_mean_background:
if shift_mean:
mean_signal /= np.max(mean_signal)
mean_signal *= np.abs(np.max(plot_output) - np.min(plot_output)) / 4
mean_signal = mean_signal + np.max(plot_output) * 1.1
axs.plot(plot_t, mean_signal, alpha=mean_alpha, lw=lw, c="k", zorder=10, label="Mean brain")
# if a stimulus was present
if legend and stimulus and model.params["ext_exc_current"].ndim > 0:
axs.plot(plot_t, plot_stimulus, lw=lw, c="C1", label="Stimulus", alpha=0.8)
# add a line for adaptation current in legend
if adaptation:
axs.plot([0], [0], lw=lw, c="C0", label="Adaptation")
leg = axs.legend(frameon=True, fontsize=14, framealpha=0.9)
# Get the bounding box of the original legend
bb = leg.get_bbox_to_anchor().inverse_transformed(axs.transAxes)
# Change to location of the legend.
yOffset = +0.0
bb.y0 += yOffset
bb.y1 += yOffset
leg.set_bbox_to_anchor(bb, transform=axs.transAxes)
if adaptation and not stimulus:
color = "C0"
ax_adapt = axs.twinx()
# ax_adapt.set_ylabel("$I_A$ [pA]", color=color)
ax_adapt.tick_params(
axis="y",
color=color,
labelcolor=color,
direction="out",
length=tick_length,
width=tick_width,
labelsize=fontsize,
right=False,
)
# plot_adaptation = model.outputs.IA if str(plot_nodes) == "all" else model.outputs.IA[plot_nodes]
ax_adapt.plot(plot_t, plot_adaptation.T, lw=lw, label="Adaptation", color=color)
# ax_adapt.legend(loc=4)
if legend and adaptation:
ax_adapt.legend(loc="lower right", frameon=True, fontsize=14, framealpha=0.9)
ax_adapt.spines["right"].set_visible(False)
ax_adapt.spines["top"].set_visible(False)
ax_adapt.spines["left"].set_visible(False)
ax_adapt.spines["bottom"].set_visible(False)
ax_adapt.yaxis.set_major_locator(plt.MaxNLocator(n_ticks))
if labels:
axs.set_xlabel("Time [s]")
axs.set_ylabel("Rate $r_E$ [Hz]")
if adaptation:
ax_adapt.set_ylabel("Adaptation $I_A$ [pA]", color=color)
axs.spines["right"].set_visible(False)
axs.spines["top"].set_visible(False)
axs.spines["left"].set_visible(False)
axs.spines["bottom"].set_visible(False)
# reduce number of ticks
axs.yaxis.set_major_locator(plt.MaxNLocator(n_ticks))
axs.xaxis.set_major_locator(MaxNLocator(integer=True))
if yticks is not None:
axs.set_yticks(yticks)
axs.tick_params(
axis="both",
direction="out",
length=tick_length,
width=tick_width,
colors="k",
labelsize=fontsize,
bottom=False,
left=False,
)
if crop_zero:
axs.set_xlim(crop_zero, model.t[-1] / 1000)
plt.tight_layout()
# set the axis size precisely
set_axis_size(figsize[0], figsize[1])
if savename:
save_fname = os.path.join(paths.FIGURES_DIR, f"{savename}")
plt.savefig(save_fname)
# helper function
def set_axis_size(w, h, ax=None):
""" w, h: width, height in inches """
if not ax:
ax = plt.gca()
l = ax.figure.subplotpars.left
r = ax.figure.subplotpars.right
t = ax.figure.subplotpars.top
b = ax.figure.subplotpars.bottom
figw = float(w) / (r - l)
figh = float(h) / (t - b)
ax.figure.set_size_inches(figw, figh)
def plot_matrix(mat, cbarlabel, cbar=True, ylabels=True, plotlog=False):
figsize = (2, 2)
fig = plt.figure(figsize=figsize)
# fc_fit = du.model_fit(model, ds, bold_transient=bold_transient, fc=True)["mean_fc_score"]
# plt.title(f"FC (corr: {fc_fit:0.2f})", fontsize=12)
if plotlog:
from matplotlib.colors import LogNorm
im = plt.imshow(mat, norm=LogNorm(vmin=10e-5, vmax=np.max(mat)), origin="upper")
else:
im = plt.imshow(mat, origin="upper")
plt.ylabel("Node")
plt.xlabel("Node")
plt.xticks([0, 20, 40, 60])
plt.yticks([0, 20, 40, 60])
if ylabels == False:
plt.ylabel("")
plt.yticks([])
if cbar:
# cbaxes = fig.add_axes([0.95, 0.36, 0.02, 0.52])
cbar = plt.colorbar(im, ax=plt.gca(), fraction=0.046, pad=0.04)
# cbar = plt.colorbar(im, cbaxes)
# cbar.set_ticks([0, 1])
# cbar.ax.tick_params(width=0, labelsize=10)
cbar.set_label(label=cbarlabel, size=10, labelpad=-1)
plt.tight_layout()
set_axis_size(*figsize)
################### plot fits
def plot_fc(bold, model=None, bold_transient=0, cbar=False, ylabels=True):
if bold_transient and model:
t_bold = model.outputs.BOLD.t_BOLD[model.outputs.BOLD.t_BOLD > bold_transient] / 1000
bold = model.outputs.BOLD.BOLD[:, model.outputs.BOLD.t_BOLD > bold_transient]
figsize = (2, 2)
fig = plt.figure(figsize=figsize)
# fc_fit = du.model_fit(model, ds, bold_transient=bold_transient, fc=True)["mean_fc_score"]
# plt.title(f"FC (corr: {fc_fit:0.2f})", fontsize=12)
im = plt.imshow(func.fc(bold), origin="upper", clim=(0, 1))
plt.ylabel("Node")
plt.xlabel("Node")
plt.xticks([0, 20, 40, 60])
plt.yticks([0, 20, 40, 60])
if ylabels == False:
plt.ylabel("")
plt.yticks([])
if cbar:
cbaxes = fig.add_axes([0.95, 0.36, 0.02, 0.52])
cbar = plt.colorbar(im, cbaxes)
cbar.set_ticks([0, 1])
cbar.ax.tick_params(width=0, labelsize=10)
cbar.set_label(label="Correlation", size=10, labelpad=-1)
plt.tight_layout()
set_axis_size(*figsize)
def plot_fcd(bold, model=None, bold_transient=0, cbar=False, ylabels=True):
if bold_transient and model:
t_bold = model.outputs.BOLD.t_BOLD[model.outputs.BOLD.t_BOLD > bold_transient] / 1000
bold = model.outputs.BOLD.BOLD[:, model.outputs.BOLD.t_BOLD > bold_transient]
figsize = (2, 2)
fig = plt.figure(figsize=figsize)
fcd_matrix = func.fcd(bold)
print(fcd_matrix.shape)
im = plt.imshow(fcd_matrix, origin="upper")
plt.xlabel("Time [min]")
plt.ylabel("Time [min]")
# nticks = 3
# plt.xticks(np.linspace(0, fcd_matrix.shape[0] - 1, nticks), np.linspace(0, bold.shape[1] * 2.0, nticks, dtype=int))
# plt.yticks(np.linspace(0, fcd_matrix.shape[0] - 1, nticks), np.linspace(0, bold.shape[1] * 2.0, nticks, dtype=int))
plt.xticks([0, 32, 64], [0, 6, 12])
plt.yticks([0, 32, 64], [0, 6, 12])
if ylabels == False:
plt.ylabel("")
plt.yticks([])
if cbar:
cbaxes = fig.add_axes([0.9, 0.36, 0.02, 0.52])
cbar = plt.colorbar(im, cbaxes)
# cbar.set_ticks([0.4, 1])
cbar.ax.locator_params(nbins=5)
cbar.ax.tick_params(width=0, labelsize=10)
cbar.set_label(label="Correlation", size=10)
plt.tight_layout(w_pad=4.5)
set_axis_size(*figsize)
def plot_fcd_distribution(model, ds, bold_transient=0):
if bold_transient and model:
t_bold = model.outputs.BOLD.t_BOLD[model.outputs.BOLD.t_BOLD > bold_transient] / 1000
bold = model.outputs.BOLD.BOLD[:, model.outputs.BOLD.t_BOLD > bold_transient]
figsize = (4, 3)
fig = plt.figure(figsize=figsize)
# plot distribution in fcd
# axs[2, 1].set_title(f"FCD distance {fcd_fit:0.2f}", fontsize=12)
plt.ylabel("Density")
plt.yticks([])
plt.xticks([0, 0.5, 1])
plt.xlabel("FCD$_{ij}$")
m1 = func.fcd(bold)
triu_m1_vals = m1[np.triu_indices(m1.shape[0], k=1)]
plt.hist(triu_m1_vals, density=True, color="springgreen", zorder=10, alpha=0.6, edgecolor="k")
# plot fcd distributions of data
if hasattr(ds, "FCDs"):
for emp_fcd in ds.FCDs:
m1 = emp_fcd
triu_m1_vals = m1[ | np.triu_indices(m1.shape[0], k=1) | numpy.triu_indices |
import os
import logging
from PIL import Image
import numpy as np
import sklearn.datasets
import joblib
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, datasets
import skorch.dataset
from skorch.utils import to_numpy
from utils.helpers import set_attributes
from .helpers import train_dev_split, make_ssl_dataset_
from .transforms import (precompute_batch_tranforms, global_contrast_normalization,
zca_whitening, random_translation, robust_minmax_scale,
gaussian_noise)
DIR = os.path.abspath(os.path.dirname(__file__))
DATASETS_DICT = {"cifar10": "CIFAR10",
"svhn": "SVHN",
"mnist": "MNIST",
"pts_circles": "None",
"pts_moons": "None",
"pts_var_gaus": "None",
"pts_cov_gaus": "None",
"pts_iso_gaus": "None"}
DATASETS = list(DATASETS_DICT.keys())
N_LABELS = {"cifar10": 4000,
"svhn": 1000,
"mnist": 100,
"pts_circles": 10,
"pts_moons": 6,
"pts_var_gaus": 12,
"pts_cov_gaus": 12,
"pts_iso_gaus": 12}
def get_dataset(dataset):
"""Return the correct dataset."""
dataset = dataset.lower()
try:
# eval because stores name as string in order to put it at top of file
return eval(DATASETS_DICT[dataset])
except KeyError:
raise ValueError("Unkown dataset: {}".format(dataset))
def get_train_dev_test_ssl(dataset,
n_labels=None,
root=None,
dev_size=0.1,
seed=123,
**kwargs):
"""Return the training, validation and test dataloaders
Parameters
----------
dataset : {"cifar", "svhn"}
Name of the dataset to load
n_labels : int
Number of labels to keep. If `None` uses dataset specific default.
root : str, optional
Path to the dataset root. If `None` uses the default one.
dev_size : float or int, optional
If float, should be between 0.0 and 1.0 and represent the proportion of
the dataset to include in the dev split. If int, represents the absolute
number of dev samples.
seed : int, optional
Random seed.
kwargs :
Additional arguments to `generate_train_dev_test_ssl`.
"""
_Dataset = get_dataset(dataset)
if n_labels is None:
n_labels = N_LABELS[dataset]
if _Dataset is None:
# has to generate
return generate_train_dev_test_ssl(dataset, n_labels,
dev_size=dev_size,
seed=seed,
**kwargs)
data_kwargs = dict()
if root is not None:
data_kwargs["root"] = root
# important to do train before test => compute ZCA on train
train = _Dataset(split="train", **data_kwargs)
test = _Dataset(split="test", **data_kwargs)
# Nota Bene: we are actually first doing the transformatiosn such as GCN
# the dev splitting => normalization also done on dev, which is not ideal
# but fine because not doing on the test set => final results will be correct
# but hyperparametrization is a little biased
train, dev = train_dev_split(train, dev_size=dev_size, seed=seed, is_stratify=True)
make_ssl_dataset_(train, n_labels, seed=seed, is_stratify=True)
return train, dev, test
# POINT DATASETS
def generate_train_dev_test_ssl(dataset, n_label,
n_unlabel=int(1e4),
n_test=int(1e4),
dev_size=0.1, seed=123, is_hard=False):
"""Genererate simple ssl datasets.
Parameters
----------
dataset : {"pts_circles", "pts_moons", "pts_var_gaus", "pts_cov_gaus", "pts_iso_gaus"}
Name of the dataset to generate
n_labels : int
Number of labelled examples.
n_lunabels : int
Number of unlabelled examples.
n_test : int
Number of test examples.
root : str
Path to the dataset root. If `None` uses the default one.
dev_size : float or int
If float, should be between 0.0 and 1.0 and represent a ratio beteen the
n_unlabel and size of the dev set. If int, represents the absolute number
of dev samples.
seed : int, optional
Random seed.
is_hard : bool, optional
Whether to increase nosie / variance by 1.5x to make the task more difficult.
"""
n_dev = int(n_unlabel * dev_size) if dev_size < 1 else dev_size
hard_factor = 1.5 if is_hard else 1 # multiply by 1.5 if hard
gaus_means = np.array([[7, 9], [8., -1], [-5., -9.]])
args = dict(pts_circles={"f": sklearn.datasets.make_circles,
"std": 0.09, "kwargs": dict(factor=.5)},
pts_moons={"f": sklearn.datasets.make_moons,
"std": 0.13, "kwargs": {}},
pts_iso_gaus={"f": sklearn.datasets.make_blobs,
"std": 1.5, "kwargs": dict(centers=gaus_means)},
pts_cov_gaus={"f": sklearn.datasets.make_blobs,
"std": 1.5,
"kwargs": dict(centers=gaus_means)},
pts_var_gaus={"f": sklearn.datasets.make_blobs,
"std": np.array([1.0, 2.5, 0.5]),
"kwargs": dict(centers=gaus_means)})
spec_args = args[dataset]
def get_noisy_kwargs(is_label=False):
if "_gaus" in dataset:
std_label_factor = 1 if not is_label else 0.5 # divide by 10 std
spec_args["kwargs"]["cluster_std"] = spec_args["std"] * std_label_factor * hard_factor
else:
std_label_factor = 1 if not is_label else 0 # no noise
spec_args["kwargs"]["noise"] = spec_args["std"] * std_label_factor * hard_factor
return spec_args["kwargs"]
X_lab, y_lab = spec_args["f"](n_samples=n_label, random_state=seed,
**get_noisy_kwargs(True))
X_unlab, y_unlab = spec_args["f"](n_samples=n_unlabel, random_state=seed,
**get_noisy_kwargs())
y_unlab[:] = -1
X_train = np.concatenate([X_lab, X_unlab])
y_train = np.concatenate([y_lab, y_unlab])
X_dev, y_dev = spec_args["f"](n_samples=n_dev, random_state=seed,
**get_noisy_kwargs())
X_test, y_test = spec_args["f"](n_samples=n_test, random_state=seed,
**get_noisy_kwargs())
if dataset == "pts_cov_gaus":
transformation = [[0.6, -0.6], [-0.4, 0.8]]
X_train = np.dot(X_train, transformation)
X_dev = np.dot(X_dev, transformation)
X_test = | np.dot(X_test, transformation) | numpy.dot |
from __future__ import division, print_function, absolute_import
__usage__ = """
To run tests locally:
python tests/test_arpack.py [-l<int>] [-v<int>]
"""
import warnings
import numpy as np
from numpy.testing import assert_allclose, \
assert_array_almost_equal_nulp, TestCase, run_module_suite, dec, \
assert_raises, verbose, assert_equal, assert_array_equal
from numpy import array, finfo, argsort, dot, round, conj, random
from scipy.linalg import eig, eigh
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix, isspmatrix
from scipy.sparse.linalg import LinearOperator, aslinearoperator
from scipy.sparse.linalg.eigen.arpack import eigs, eigsh, svds, \
ArpackNoConvergence, arpack
from scipy.linalg import svd, hilbert
from scipy.lib._gcutils import assert_deallocated
# eigs() and eigsh() are called many times, so apply a filter for the warnings
# they generate here.
_eigs_warn_msg = "Single-precision types in `eigs` and `eighs`"
def setup_module():
warnings.filterwarnings("ignore", message=_eigs_warn_msg)
def teardown_module():
warnings.filterwarnings("default", message=_eigs_warn_msg)
# precision for tests
_ndigits = {'f': 3, 'd': 11, 'F': 3, 'D': 11}
def _get_test_tolerance(type_char, mattype=None):
"""
Return tolerance values suitable for a given test:
Parameters
----------
type_char : {'f', 'd', 'F', 'D'}
Data type in ARPACK eigenvalue problem
mattype : {csr_matrix, aslinearoperator, asarray}, optional
Linear operator type
Returns
-------
tol
Tolerance to pass to the ARPACK routine
rtol
Relative tolerance for outputs
atol
Absolute tolerance for outputs
"""
rtol = {'f': 3000 * np.finfo(np.float32).eps,
'F': 3000 * np.finfo(np.float32).eps,
'd': 2000 * np.finfo(np.float64).eps,
'D': 2000 * np.finfo(np.float64).eps}[type_char]
atol = rtol
tol = 0
if mattype is aslinearoperator and type_char in ('f', 'F'):
# iterative methods in single precision: worse errors
# also: bump ARPACK tolerance so that the iterative method converges
tol = 30 * np.finfo(np.float32).eps
rtol *= 5
if mattype is csr_matrix and type_char in ('f', 'F'):
# sparse in single precision: worse errors
rtol *= 5
return tol, rtol, atol
def generate_matrix(N, complex=False, hermitian=False,
pos_definite=False, sparse=False):
M = np.random.random((N,N))
if complex:
M = M + 1j * np.random.random((N,N))
if hermitian:
if pos_definite:
if sparse:
i = np.arange(N)
j = np.random.randint(N, size=N-2)
i, j = np.meshgrid(i, j)
M[i,j] = 0
M = np.dot(M.conj(), M.T)
else:
M = np.dot(M.conj(), M.T)
if sparse:
i = np.random.randint(N, size=N * N // 4)
j = np.random.randint(N, size=N * N // 4)
ind = np.where(i == j)
j[ind] = (j[ind] + 1) % N
M[i,j] = 0
M[j,i] = 0
else:
if sparse:
i = np.random.randint(N, size=N * N // 2)
j = np.random.randint(N, size=N * N // 2)
M[i,j] = 0
return M
def _aslinearoperator_with_dtype(m):
m = aslinearoperator(m)
if not hasattr(m, 'dtype'):
x = np.zeros(m.shape[1])
m.dtype = (m * x).dtype
return m
def assert_allclose_cc(actual, desired, **kw):
"""Almost equal or complex conjugates almost equal"""
try:
assert_allclose(actual, desired, **kw)
except:
assert_allclose(actual, conj(desired), **kw)
def argsort_which(eval, typ, k, which,
sigma=None, OPpart=None, mode=None):
"""Return sorted indices of eigenvalues using the "which" keyword
from eigs and eigsh"""
if sigma is None:
reval = np.round(eval, decimals=_ndigits[typ])
else:
if mode is None or mode == 'normal':
if OPpart is None:
reval = 1. / (eval - sigma)
elif OPpart == 'r':
reval = 0.5 * (1. / (eval - sigma)
+ 1. / (eval - np.conj(sigma)))
elif OPpart == 'i':
reval = -0.5j * (1. / (eval - sigma)
- 1. / (eval - np.conj(sigma)))
elif mode == 'cayley':
reval = (eval + sigma) / (eval - sigma)
elif mode == 'buckling':
reval = eval / (eval - sigma)
else:
raise ValueError("mode='%s' not recognized" % mode)
reval = np.round(reval, decimals=_ndigits[typ])
if which in ['LM', 'SM']:
ind = np.argsort(abs(reval))
elif which in ['LR', 'SR', 'LA', 'SA', 'BE']:
ind = np.argsort(np.real(reval))
elif which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest abs(imaginary) why?
if typ.islower():
ind = np.argsort(abs(np.imag(reval)))
else:
ind = np.argsort(np.imag(reval))
else:
raise ValueError("which='%s' is unrecognized" % which)
if which in ['LM', 'LA', 'LR', 'LI']:
return ind[-k:]
elif which in ['SM', 'SA', 'SR', 'SI']:
return ind[:k]
elif which == 'BE':
return np.concatenate((ind[:k//2], ind[k//2-k:]))
def eval_evec(symmetric, d, typ, k, which, v0=None, sigma=None,
mattype=np.asarray, OPpart=None, mode='normal'):
general = ('bmat' in d)
if symmetric:
eigs_func = eigsh
else:
eigs_func = eigs
if general:
err = ("error for %s:general, typ=%s, which=%s, sigma=%s, "
"mattype=%s, OPpart=%s, mode=%s" % (eigs_func.__name__,
typ, which, sigma,
mattype.__name__,
OPpart, mode))
else:
err = ("error for %s:standard, typ=%s, which=%s, sigma=%s, "
"mattype=%s, OPpart=%s, mode=%s" % (eigs_func.__name__,
typ, which, sigma,
mattype.__name__,
OPpart, mode))
a = d['mat'].astype(typ)
ac = mattype(a)
if general:
b = d['bmat'].astype(typ.lower())
bc = mattype(b)
# get exact eigenvalues
exact_eval = d['eval'].astype(typ.upper())
ind = argsort_which(exact_eval, typ, k, which,
sigma, OPpart, mode)
exact_eval_a = exact_eval
exact_eval = exact_eval[ind]
# compute arpack eigenvalues
kwargs = dict(which=which, v0=v0, sigma=sigma)
if eigs_func is eigsh:
kwargs['mode'] = mode
else:
kwargs['OPpart'] = OPpart
# compute suitable tolerances
kwargs['tol'], rtol, atol = _get_test_tolerance(typ, mattype)
# on rare occasions, ARPACK routines return results that are proper
# eigenvalues and -vectors, but not necessarily the ones requested in
# the parameter which. This is inherent to the Krylov methods, and
# should not be treated as a failure. If such a rare situation
# occurs, the calculation is tried again (but at most a few times).
ntries = 0
while ntries < 5:
# solve
if general:
try:
eval, evec = eigs_func(ac, k, bc, **kwargs)
except ArpackNoConvergence:
kwargs['maxiter'] = 20*a.shape[0]
eval, evec = eigs_func(ac, k, bc, **kwargs)
else:
try:
eval, evec = eigs_func(ac, k, **kwargs)
except ArpackNoConvergence:
kwargs['maxiter'] = 20*a.shape[0]
eval, evec = eigs_func(ac, k, **kwargs)
ind = argsort_which(eval, typ, k, which,
sigma, OPpart, mode)
eval_a = eval
eval = eval[ind]
evec = evec[:,ind]
# check eigenvectors
LHS = np.dot(a, evec)
if general:
RHS = eval * np.dot(b, evec)
else:
RHS = eval * evec
assert_allclose(LHS, RHS, rtol=rtol, atol=atol, err_msg=err)
try:
# check eigenvalues
assert_allclose_cc(eval, exact_eval, rtol=rtol, atol=atol,
err_msg=err)
break
except AssertionError:
ntries += 1
# check eigenvalues
assert_allclose_cc(eval, exact_eval, rtol=rtol, atol=atol, err_msg=err)
class DictWithRepr(dict):
def __init__(self, name):
self.name = name
def __repr__(self):
return "<%s>" % self.name
class SymmetricParams:
def __init__(self):
self.eigs = eigsh
self.which = ['LM', 'SM', 'LA', 'SA', 'BE']
self.mattypes = [csr_matrix, aslinearoperator, np.asarray]
self.sigmas_modes = {None: ['normal'],
0.5: ['normal', 'buckling', 'cayley']}
# generate matrices
# these should all be float32 so that the eigenvalues
# are the same in float32 and float64
N = 6
np.random.seed(2300)
Ar = generate_matrix(N, hermitian=True,
pos_definite=True).astype('f').astype('d')
M = generate_matrix(N, hermitian=True,
pos_definite=True).astype('f').astype('d')
Ac = generate_matrix(N, hermitian=True, pos_definite=True,
complex=True).astype('F').astype('D')
v0 = np.random.random(N)
# standard symmetric problem
SS = DictWithRepr("std-symmetric")
SS['mat'] = Ar
SS['v0'] = v0
SS['eval'] = eigh(SS['mat'], eigvals_only=True)
# general symmetric problem
GS = DictWithRepr("gen-symmetric")
GS['mat'] = Ar
GS['bmat'] = M
GS['v0'] = v0
GS['eval'] = eigh(GS['mat'], GS['bmat'], eigvals_only=True)
# standard hermitian problem
SH = DictWithRepr("std-hermitian")
SH['mat'] = Ac
SH['v0'] = v0
SH['eval'] = eigh(SH['mat'], eigvals_only=True)
# general hermitian problem
GH = DictWithRepr("gen-hermitian")
GH['mat'] = Ac
GH['bmat'] = M
GH['v0'] = v0
GH['eval'] = eigh(GH['mat'], GH['bmat'], eigvals_only=True)
self.real_test_cases = [SS, GS]
self.complex_test_cases = [SH, GH]
class NonSymmetricParams:
def __init__(self):
self.eigs = eigs
self.which = ['LM', 'LR', 'LI'] # , 'SM', 'LR', 'SR', 'LI', 'SI']
self.mattypes = [csr_matrix, aslinearoperator, np.asarray]
self.sigmas_OPparts = {None: [None],
0.1: ['r'],
0.1 + 0.1j: ['r', 'i']}
# generate matrices
# these should all be float32 so that the eigenvalues
# are the same in float32 and float64
N = 6
np.random.seed(2300)
Ar = generate_matrix(N).astype('f').astype('d')
M = generate_matrix(N, hermitian=True,
pos_definite=True).astype('f').astype('d')
Ac = generate_matrix(N, complex=True).astype('F').astype('D')
v0 = np.random.random(N)
# standard real nonsymmetric problem
SNR = DictWithRepr("std-real-nonsym")
SNR['mat'] = Ar
SNR['v0'] = v0
SNR['eval'] = eig(SNR['mat'], left=False, right=False)
# general real nonsymmetric problem
GNR = DictWithRepr("gen-real-nonsym")
GNR['mat'] = Ar
GNR['bmat'] = M
GNR['v0'] = v0
GNR['eval'] = eig(GNR['mat'], GNR['bmat'], left=False, right=False)
# standard complex nonsymmetric problem
SNC = DictWithRepr("std-cmplx-nonsym")
SNC['mat'] = Ac
SNC['v0'] = v0
SNC['eval'] = eig(SNC['mat'], left=False, right=False)
# general complex nonsymmetric problem
GNC = DictWithRepr("gen-cmplx-nonsym")
GNC['mat'] = Ac
GNC['bmat'] = M
GNC['v0'] = v0
GNC['eval'] = eig(GNC['mat'], GNC['bmat'], left=False, right=False)
self.real_test_cases = [SNR, GNR]
self.complex_test_cases = [SNC, GNC]
def test_symmetric_modes():
params = SymmetricParams()
k = 2
symmetric = True
for D in params.real_test_cases:
for typ in 'fd':
for which in params.which:
for mattype in params.mattypes:
for (sigma, modes) in params.sigmas_modes.items():
for mode in modes:
yield (eval_evec, symmetric, D, typ, k, which,
None, sigma, mattype, None, mode)
def test_hermitian_modes():
params = SymmetricParams()
k = 2
symmetric = True
for D in params.complex_test_cases:
for typ in 'FD':
for which in params.which:
if which == 'BE':
continue # BE invalid for complex
for mattype in params.mattypes:
for sigma in params.sigmas_modes:
yield (eval_evec, symmetric, D, typ, k, which,
None, sigma, mattype)
def test_symmetric_starting_vector():
params = SymmetricParams()
symmetric = True
for k in [1, 2, 3, 4, 5]:
for D in params.real_test_cases:
for typ in 'fd':
v0 = random.rand(len(D['v0'])).astype(typ)
yield (eval_evec, symmetric, D, typ, k, 'LM', v0)
def test_symmetric_no_convergence():
np.random.seed(1234)
m = generate_matrix(30, hermitian=True, pos_definite=True)
tol, rtol, atol = _get_test_tolerance('d')
try:
w, v = eigsh(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol)
raise AssertionError("Spurious no-error exit")
except ArpackNoConvergence as err:
k = len(err.eigenvalues)
if k <= 0:
raise AssertionError("Spurious no-eigenvalues-found case")
w, v = err.eigenvalues, err.eigenvectors
assert_allclose(dot(m, v), w * v, rtol=rtol, atol=atol)
def test_real_nonsymmetric_modes():
params = NonSymmetricParams()
k = 2
symmetric = False
for D in params.real_test_cases:
for typ in 'fd':
for which in params.which:
for mattype in params.mattypes:
for sigma, OPparts in params.sigmas_OPparts.items():
for OPpart in OPparts:
yield (eval_evec, symmetric, D, typ, k, which,
None, sigma, mattype, OPpart)
def test_complex_nonsymmetric_modes():
params = NonSymmetricParams()
k = 2
symmetric = False
for D in params.complex_test_cases:
for typ in 'DF':
for which in params.which:
for mattype in params.mattypes:
for sigma in params.sigmas_OPparts:
yield (eval_evec, symmetric, D, typ, k, which,
None, sigma, mattype)
def test_standard_nonsymmetric_starting_vector():
params = NonSymmetricParams()
sigma = None
symmetric = False
for k in [1, 2, 3, 4]:
for d in params.complex_test_cases:
for typ in 'FD':
A = d['mat']
n = A.shape[0]
v0 = random.rand(n).astype(typ)
yield (eval_evec, symmetric, d, typ, k, "LM", v0, sigma)
def test_general_nonsymmetric_starting_vector():
params = NonSymmetricParams()
sigma = None
symmetric = False
for k in [1, 2, 3, 4]:
for d in params.complex_test_cases:
for typ in 'FD':
A = d['mat']
n = A.shape[0]
v0 = random.rand(n).astype(typ)
yield (eval_evec, symmetric, d, typ, k, "LM", v0, sigma)
def test_standard_nonsymmetric_no_convergence():
np.random.seed(1234)
m = generate_matrix(30, complex=True)
tol, rtol, atol = _get_test_tolerance('d')
try:
w, v = eigs(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol)
raise AssertionError("Spurious no-error exit")
except ArpackNoConvergence as err:
k = len(err.eigenvalues)
if k <= 0:
raise AssertionError("Spurious no-eigenvalues-found case")
w, v = err.eigenvalues, err.eigenvectors
for ww, vv in zip(w, v.T):
assert_allclose(dot(m, vv), ww * vv, rtol=rtol, atol=atol)
def test_eigen_bad_shapes():
# A is not square.
A = csc_matrix(np.zeros((2, 3)))
assert_raises(ValueError, eigs, A)
def test_eigen_bad_kwargs():
# Test eigen on wrong keyword argument
A = csc_matrix(np.zeros((2, 2)))
assert_raises(ValueError, eigs, A, which='XX')
def test_ticket_1459_arpack_crash():
for dtype in [np.float32, np.float64]:
# XXX: this test does not seem to catch the issue for float32,
# but we made the same fix there, just to be sure
N = 6
k = 2
np.random.seed(2301)
A = np.random.random((N, N)).astype(dtype)
v0 = np.array([-0.71063568258907849895, -0.83185111795729227424,
-0.34365925382227402451, 0.46122533684552280420,
-0.58001341115969040629, -0.78844877570084292984e-01],
dtype=dtype)
# Should not crash:
evals, evecs = eigs(A, k, v0=v0)
#----------------------------------------------------------------------
# sparse SVD tests
def sorted_svd(m, k, which='LM'):
# Compute svd of a dense matrix m, and return singular vectors/values
# sorted.
if isspmatrix(m):
m = m.todense()
u, s, vh = svd(m)
if which == 'LM':
ii = np.argsort(s)[-k:]
elif which == 'SM':
ii = np.argsort(s)[:k]
else:
raise ValueError("unknown which=%r" % (which,))
return u[:, ii], s[ii], vh[ii]
def svd_estimate(u, s, vh):
return np.dot(u, np.dot(np.diag(s), vh))
def test_svd_simple_real():
x = np.array([[1, 2, 3],
[3, 4, 3],
[1, 0, 2],
[0, 0, 1]], np.float)
y = np.array([[1, 2, 3, 8],
[3, 4, 3, 5],
[1, 0, 2, 3],
[0, 0, 1, 0]], np.float)
z = csc_matrix(x)
for m in [x.T, x, y, z, z.T]:
for k in range(1, min(m.shape)):
u, s, vh = sorted_svd(m, k)
su, ss, svh = svds(m, k)
m_hat = svd_estimate(u, s, vh)
sm_hat = svd_estimate(su, ss, svh)
assert_array_almost_equal_nulp(m_hat, sm_hat, nulp=1000)
def test_svd_simple_complex():
x = np.array([[1, 2, 3],
[3, 4, 3],
[1 + 1j, 0, 2],
[0, 0, 1]], np.complex)
y = np.array([[1, 2, 3, 8 + 5j],
[3 - 2j, 4, 3, 5],
[1, 0, 2, 3],
[0, 0, 1, 0]], np.complex)
z = csc_matrix(x)
for m in [x, x.T.conjugate(), x.T, y, y.conjugate(), z, z.T]:
for k in range(1, min(m.shape) - 1):
u, s, vh = sorted_svd(m, k)
su, ss, svh = svds(m, k)
m_hat = svd_estimate(u, s, vh)
sm_hat = svd_estimate(su, ss, svh)
assert_array_almost_equal_nulp(m_hat, sm_hat, nulp=1000)
def test_svd_maxiter():
# check that maxiter works as expected
x = hilbert(6)
# ARPACK shouldn't converge on such an ill-conditioned matrix with just
# one iteration
assert_raises(ArpackNoConvergence, svds, x, 1, maxiter=1)
# but 100 iterations should be more than enough
u, s, vt = svds(x, 1, maxiter=100)
assert_allclose(s, [1.7], atol=0.5)
def test_svd_return():
# check that the return_singular_vectors parameter works as expected
x = hilbert(6)
_, s, _ = sorted_svd(x, 2)
ss = svds(x, 2, return_singular_vectors=False)
assert_allclose(s, ss)
def test_svd_which():
# check that the which parameter works as expected
x = hilbert(6)
for which in ['LM', 'SM']:
_, s, _ = sorted_svd(x, 2, which=which)
ss = svds(x, 2, which=which, return_singular_vectors=False)
ss.sort()
assert_allclose(s, ss, atol=np.sqrt(1e-15))
def test_svd_v0():
# check that the v0 parameter works as expected
x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], float)
u, s, vh = svds(x, 1)
u2, s2, vh2 = svds(x, 1, v0=u[:,0])
assert_allclose(s, s2, atol=np.sqrt(1e-15))
def _check_svds(A, k, U, s, VH):
n, m = A.shape
# Check shapes.
assert_equal(U.shape, (n, k))
assert_equal(s.shape, (k,))
assert_equal(VH.shape, (k, m))
# Check that the original matrix can be reconstituted.
A_rebuilt = (U*s).dot(VH)
assert_equal(A_rebuilt.shape, A.shape)
assert_allclose(A_rebuilt, A)
# Check that U is a semi-orthogonal matrix.
UH_U = np.dot(U.T.conj(), U)
assert_equal(UH_U.shape, (k, k))
assert_allclose(UH_U, np.identity(k), atol=1e-12)
# Check that V is a semi-orthogonal matrix.
VH_V = np.dot(VH, VH.T.conj())
assert_equal(VH_V.shape, (k, k))
assert_allclose(VH_V, np.identity(k), atol=1e-12)
def test_svd_LM_ones_matrix():
# Check that svds can deal with matrix_rank less than k in LM mode.
k = 3
for n, m in (6, 5), (5, 5), (5, 6):
for t in float, complex:
A = np.ones((n, m), dtype=t)
U, s, VH = svds(A, k)
# Check some generic properties of svd.
_check_svds(A, k, U, s, VH)
# Check that the largest singular value is near sqrt(n*m)
# and the other singular values have been forced to zero.
assert_allclose(np.max(s), np.sqrt(n*m))
assert_array_equal(sorted(s)[:-1], 0)
def test_svd_LM_zeros_matrix():
# Check that svds can deal with matrices containing only zeros.
k = 1
for n, m in (3, 4), (4, 4), (4, 3):
for t in float, complex:
A = np.zeros((n, m), dtype=t)
U, s, VH = svds(A, k)
# Check some generic properties of svd.
_check_svds(A, k, U, s, VH)
# Check that the singular values are zero.
assert_array_equal(s, 0)
def test_svd_LM_zeros_matrix_gh_3452():
# Regression test for a github issue.
# https://github.com/scipy/scipy/issues/3452
# Note that for complex dype the size of this matrix is too small for k=1.
n, m, k = 4, 2, 1
A = np.zeros((n, m))
U, s, VH = svds(A, k)
# Check some generic properties of svd.
_check_svds(A, k, U, s, VH)
# Check that the singular values are zero.
assert_array_equal(s, 0)
class CheckingLinearOperator(LinearOperator):
def __init__(self, A):
self.A = A
self.dtype = A.dtype
self.shape = A.shape
def matvec(self, x):
assert_equal(max(x.shape), np.size(x))
return self.A.dot(x)
def rmatvec(self, x):
assert_equal(max(x.shape), | np.size(x) | numpy.size |
# -*- coding: utf-8 -*-
# Copyright (c) 2020. Distributed under the terms of the MIT License.
from typing import List, Tuple
import numpy as np
from pydefect.defaults import defaults
from pydefect.input_maker.defect import SimpleDefect
from pydefect.input_maker.defect_entry import DefectEntry
from pydefect.input_maker.defect_set import DefectSet
from pydefect.input_maker.supercell_info import SupercellInfo
from pymatgen import Structure, IStructure
class DefectEntriesMaker:
def __init__(self, supercell_info: SupercellInfo, defect_set: DefectSet):
self.supercell_info = supercell_info
self.defect_entries = set()
for defect in defect_set:
structure, perturbed_structure, coords, site_symmetry = \
self._create_defect_structures(defect)
for charge in defect.charges:
self.defect_entries.add(
DefectEntry(name=defect.name,
charge=charge,
structure=structure,
perturbed_structure=perturbed_structure,
site_symmetry=site_symmetry,
defect_center=coords))
def _create_defect_structures(self,
defect: SimpleDefect,
) -> Tuple[IStructure, IStructure,
Tuple[float, float, float], str]:
structure = copy_to_structure(self.supercell_info.structure)
if defect.out_atom[0] is "i":
index = int(defect.out_atom[1:]) - 1
site = self.supercell_info.interstitials[index]
cutoff = self.supercell_info.interstitial_coords(index).cutoff
coords = self.supercell_info.interstitials[index].frac_coords
else:
site = self.supercell_info.sites[defect.out_atom]
cutoff = self.supercell_info.coords(defect.out_atom).cutoff
removed_site_index = site.equivalent_atoms[0]
coords = structure.pop(removed_site_index).frac_coords
perturbed_structure = perturb_structure(structure, coords, cutoff)
if defect.in_atom:
add_atom_to_structure(structure, defect.in_atom, coords)
add_atom_to_structure(perturbed_structure, defect.in_atom, coords)
return to_istructure(structure), to_istructure(perturbed_structure), \
tuple(coords), site.site_symmetry
def copy_to_structure(structure: IStructure) -> Structure:
return Structure.from_dict(structure.as_dict())
def to_istructure(structure: Structure) -> IStructure:
return IStructure.from_dict(structure.as_dict())
def add_atom_to_structure(structure: Structure, elem: str, coords: List[float]):
"""In-place atom insertion to structure
https://stackoverflow.com/questions/2361426/get-the-first-item-from-an-iterable-that-matches-a-condition
"""
try:
idx = next(i for i, s in enumerate(structure) if str(s.specie) == elem)
except StopIteration:
idx = len(structure)
structure.insert(idx, elem, coords)
def perturb_structure(structure: Structure, center: List[float], cutoff: float
) -> Structure:
""" structure perturbation
Args:
structure: pmg Structure class object
center: Fractional coordinates of a central position.
cutoff: Radius of a sphere in which atoms are perturbed.
"""
result = structure.copy()
cartesian_coords = structure.lattice.get_cartesian_coords(center)
neighboring_atoms = structure.get_sites_in_sphere(
pt=cartesian_coords, r=cutoff, include_index=True)
# assert cutoff < min(structure.lattice.lengths) / 2.0
# neighboring_atom is composed of (PeriodicSite, distance, index)
for _, _, site_index in neighboring_atoms:
vector = random_3d_vector(defaults.displace_distance)
result.translate_sites(site_index, vector, frac_coords=False)
return result
def random_3d_vector(distance):
"""Random 3d vector with uniform spherical distribution with 0 <= norm <= 1.
stackoverflow.com/questions/5408276/python-uniform-spherical-distribution
"""
phi = np.random.uniform(0, np.pi * 2)
cos_theta = np.random.uniform(-1, 1)
theta = | np.arccos(cos_theta) | numpy.arccos |
# Copyright 2021 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Metrics."""
import math
import note_seq
import numpy as np
import scipy
from sklearn import metrics
def frechet_distance(real, fake):
"""Frechet distance.
Lower score is better.
"""
mu1, sigma1 = np.mean(real, axis=0), np.cov(real, rowvar=False)
mu2, sigma2 = np.mean(fake, axis=0), np.cov(fake, rowvar=False)
diff = mu1 - mu2
covmean, _ = scipy.linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = scipy.linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
assert np.isfinite(covmean).all() and not np.iscomplexobj(covmean)
tr_covmean = np.trace(covmean)
frechet_dist = diff.dot(diff)
frechet_dist += np.trace(sigma1) + np.trace(sigma2)
frechet_dist -= 2 * tr_covmean
return frechet_dist
def mmd_rbf(real, fake, gamma=1.0):
"""(RBF) kernel distance.
Lower score is better.
"""
XX = metrics.pairwise.rbf_kernel(real, real, gamma)
YY = metrics.pairwise.rbf_kernel(fake, fake, gamma)
XY = metrics.pairwise.rbf_kernel(real, fake, gamma)
return XX.mean() + YY.mean() - 2 * XY.mean()
def mmd_polynomial(real, fake, degree=2, gamma=1, coef0=0):
"""(Polynomial) kernel distance.
Lower score is better.
"""
XX = metrics.pairwise.polynomial_kernel(real, real, degree, gamma, coef0)
YY = metrics.pairwise.polynomial_kernel(fake, fake, degree, gamma, coef0)
XY = metrics.pairwise.polynomial_kernel(real, fake, degree, gamma, coef0)
return XX.mean() + YY.mean() - 2 * XY.mean()
def framewise_statistic(ns, stat_fn, hop_size=1, frame_size=1):
"""Computes framewise MIDI statistic."""
total_time = int(math.ceil(ns.total_time))
frames = []
trim = frame_size - hop_size
for i in range(0, total_time - trim, hop_size):
one_sec_chunk = note_seq.sequences_lib.trim_note_sequence(
ns, i, i + frame_size)
value = stat_fn(one_sec_chunk.notes)
frames.append(value)
return np.array(frames)
def note_density(ns, hop_size=1, frame_size=1):
stat_fn = lambda notes: len(notes)
return framewise_statistic(ns,
stat_fn,
hop_size=hop_size,
frame_size=frame_size)
def pitch_range(ns, hop_size=1, frame_size=1):
def stat_fn(notes):
pitches = [note.pitch for note in notes]
return max(pitches) - min(pitches) if len(pitches) > 0 else 0
return framewise_statistic(ns,
stat_fn,
hop_size=hop_size,
frame_size=frame_size)
def mean_pitch(ns, hop_size=1, frame_size=1):
def stat_fn(notes):
pitches = np.array([note.pitch for note in notes])
return pitches.mean() if len(pitches) > 0 else 0
return framewise_statistic(ns,
stat_fn,
hop_size=hop_size,
frame_size=frame_size)
def var_pitch(ns, hop_size=1, frame_size=1):
def stat_fn(notes):
pitches = np.array([note.pitch for note in notes])
return pitches.var() if len(pitches) > 0 else 0
return framewise_statistic(ns,
stat_fn,
hop_size=hop_size,
frame_size=frame_size)
def mean_note_duration(ns, hop_size=1, frame_size=1):
def stat_fn(notes):
durations = np.array([note.end_time - note.start_time for note in notes])
return durations.mean() if len(durations) > 0 else 0
return framewise_statistic(ns,
stat_fn,
hop_size=hop_size,
frame_size=frame_size)
def var_note_duration(ns, hop_size=1, frame_size=1):
def stat_fn(notes):
durations = np.array([note.end_time - note.start_time for note in notes])
return durations.var() if len(durations) > 0 else 0
return framewise_statistic(ns,
stat_fn,
hop_size=hop_size,
frame_size=frame_size)
def perceptual_midi_histograms(ns, interval=1):
"""Generates histograms for each MIDI feature."""
return dict(
nd=note_density(ns, interval=interval),
pr=pitch_range(ns, interval=interval),
mp=mean_pitch(ns, interval=interval),
vp=var_pitch(ns, interval=interval),
md=mean_note_duration(ns, interval=interval),
vd=var_note_duration(ns, interval=interval),
)
def perceptual_midi_statistics(ns, interval=1, vector=False):
"""Feature vector of means and variances of MIDI histograms.
Args:
ns: NoteSequence object.
interval: Integer time interval (in seconds) for each histogram bin.
vector: If True, returns statistics as a feature vector.
"""
features = {}
histograms = perceptual_midi_histograms(ns, interval=interval)
for key in histograms:
mu = histograms[key].mean()
var = histograms[key].var()
features[key] = (mu, var)
if vector:
vec = np.array(list(features.values()))
return vec.reshape(-1)
return features
def perceptual_similarity(ns1, ns2, interval=1):
"""Perceptual similarity as determined by Overlapping Area Metric.
Determines pairwise similarity for two NoteSequence objects.
Args:
ns1: NoteSequence object.
ns2: NoteSequence object.
interval: Integer time interval (in seconds) for each histogram bin.
"""
stats1 = perceptual_midi_statistics(ns1, interval, vector=False)
stats2 = perceptual_midi_statistics(ns2, interval, vector=False)
similarity = {}
for key in stats1:
mu1, var1 = stats1[key]
mu2, var2 = stats2[key]
similarity[key] = overlapping_area(mu1, mu2, var1, var2)
return similarity
def overlapping_area(mu1, mu2, var1, var2):
"""Compute overlapping area of two Gaussians.
Args:
mu1: Mean of first Gaussian pdf.
mu2: Mean of second Gaussian pdf.
var1: Variance of first Gaussian pdf.
var2: Variance of second Gaussian pdf.
Returns:
Overlapping area of the two density functions.
"""
idx = mu2 < mu1
mu_a = mu2 * idx + np.logical_not(idx) * mu1
mu_b = mu1 * idx + | np.logical_not(idx) | numpy.logical_not |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.