prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import configparser
import multiprocessing
from pathlib import Path
import tarfile
import tempfile
import numpy as np
import torch # pytype: disable=import-error
import yaml
def get_weight_data_type(data_type):
if data_type == "fp32":
return np.float32
elif data_type == "fp16":
return np.float16
else:
assert False, f"Invalid weight data type {data_type}"
def unpack_nemo_ckpt(nemo_ckpt_path, out_folder):
"""
.nemo file is an archive (tar.gz) with the following:
model_config.yaml - model configuration in .yaml format. You can deserialize this into cfg argument for model's constructor
model_wights.chpt - model checkpoint
"""
if not os.path.exists(nemo_ckpt_path):
raise FileNotFoundError(f"{nemo_ckpt_path} does not exist")
tar = tarfile.open(nemo_ckpt_path, "r:gz")
tar.extractall(path=out_folder)
tar.close()
return out_folder
def _cpu_map_location(storage, loc):
return storage.cpu()
def _gpu_map_location(storage, loc):
if loc.startswith("cuda"):
training_gpu_idx = int(loc.split(":")[1])
inference_gpu_idx = training_gpu_idx % torch.cuda.device_count()
return storage.cuda(inference_gpu_idx)
elif loc.startswith("cpu"):
return storage.cpu()
else:
raise NotImplementedError(f"Not handled {loc}")
# more to less. e.g., trained by 8 gpus, infer by 2 gpus
def merge_and_convert(
args, model_config, weight_files, *, load_checkpoints_to_cpu: bool = False
): # noqa: C901 too complex
saved_dir = Path(args.saved_dir)
if args.fused_qkv == 1:
saved_dir = saved_dir / f"{args.infer_gpu_num:d}-gpu/"
else:
saved_dir = saved_dir / f"unfusedQKV-{args.infer_gpu_num:d}-gpu"
saved_dir.mkdir(parents=True, exist_ok=True)
config = configparser.ConfigParser()
config["gpt"] = {}
try:
for key in vars(args):
config["gpt"][key] = f"{vars(args)[key]}"
for k, v in model_config.items():
config["gpt"][k] = f"{v}"
config["gpt"]["weight_data_type"] = args.weight_data_type
with open((saved_dir / f"config.ini").as_posix(), 'w') as configfile:
config.write(configfile)
except:
print(f"Fail to save the config in config.ini.")
np_weight_data_type = get_weight_data_type(args.weight_data_type)
prefix = Path(args.in_file)
i_gpu_num = args.infer_gpu_num
t_gpu_num = model_config["tensor_model_parallel_size"]
num_attention_heads = model_config["num_attention_heads"]
assert t_gpu_num % i_gpu_num == 0
factor = int(t_gpu_num / i_gpu_num)
num_checkpoints_per_convert = max(factor, 1)
if num_checkpoints_per_convert > torch.cuda.device_count():
print(
f"[WARNING] Need to load #{num_checkpoints_per_convert} checkpoints at once "
f"while having {torch.cuda.device_count()} GPUs. Force load checkpoints on CPU"
)
load_checkpoints_to_cpu = True
map_location_fn = _cpu_map_location if load_checkpoints_to_cpu else _gpu_map_location
# load position_embedding from rank 0
model_00 = torch.load(weight_files[0], map_location=map_location_fn)
model_00["model.language_model.embedding.position_embeddings.weight"].float().cpu().numpy().astype(
np_weight_data_type
).tofile(
(saved_dir / "model.wpe.bin").as_posix()
) # not weight, do not need transpose
del model_00
w_e_list = []
for i in range(i_gpu_num):
transformer_models = []
for j in range(factor):
model = torch.load(weight_files[i * factor + j], map_location=map_location_fn)
w_e_list.append(
model["model.language_model.embedding.word_embeddings.weight"]
.float()
.cpu()
.numpy()
.astype(np_weight_data_type)
)
prefix = "model.language_model.encoder"
model["model"] = {}
model["model"]["language_model"] = {}
model["model"]["language_model"]["encoder"] = {}
model["model"]["language_model"]["embedding"] = {}
model["model"]["language_model"]["embedding"]["word_embeddings"] = {}
model["model"]["language_model"]["embedding"]["position_embeddings"] = {}
model["model"]["language_model"]["embedding"]["word_embeddings"]["weight"] = model[
"model.language_model.embedding.word_embeddings.weight"]
model["model"]["language_model"]["embedding"]["position_embeddings"]["weight"] = model[
"model.language_model.embedding.position_embeddings.weight"]
for key in model.keys():
if prefix in key:
first = key[:len(prefix)]
second = key[len(prefix) + 1:]
model["model"]["language_model"]["encoder"][second] = model[key]
# print(model["model"]["language_model"]["encoder"].keys())
# this model should be able to load into megatron
# torch.save(model, "model.pt")
transformer_models.append(model["model"]["language_model"]["encoder"])
for key in transformer_models[0]:
if (
key.find("input_layernorm.weight") != -1
or key.find("input_layernorm.bias") != -1
or key.find("attention.dense.bias") != -1
or key.find("post_attention_layernorm.weight") != -1
or key.find("post_attention_layernorm.bias") != -1
or key.find("mlp.dense_4h_to_h.bias") != -1
or key.find("final_layernorm.weight") != -1
or key.find("final_layernorm.bias") != -1
):
# shared weights, only need to convert the weights of rank 0
if i == 0:
val = transformer_models[0][key].T.float().cpu().numpy()
key = key.replace("self_attention", "attention")
saved_path = saved_dir / f"model.{key}.bin"
np.squeeze(val).astype(np_weight_data_type).tofile(saved_path.as_posix())
elif key.find("attention.dense.weight") != -1 or key.find("mlp.dense_4h_to_h.weight") != -1:
vals = []
for k in range(factor):
vals.append(transformer_models[k][key].T.float().cpu().numpy())
key = key.replace("self_attention", "attention")
saved_path = saved_dir / f"model.{key}.{i}.bin"
np.concatenate(vals, axis=0).astype(np_weight_data_type).tofile(saved_path.as_posix())
elif key.find("mlp.dense_h_to_4h.weight") != -1 or key.find("mlp.dense_h_to_4h.bias") != -1:
vals = []
for k in range(factor):
vals.append(transformer_models[k][key].T.float().cpu().numpy())
saved_path = saved_dir / f"model.{key}.{i}.bin"
| np.concatenate(vals, axis=-1) | numpy.concatenate |
# ############################################################################
# linear_operator.py
# =======
# Authors : <NAME> [<EMAIL>] and <NAME> [<EMAIL>]
# ############################################################################
"""
Classes and routines for linear operators used in generalised FRI problems.
"""
import numpy as np
import time as t
from abc import abstractmethod
import scipy.sparse.linalg as spsparse
import numpy.linalg as nplin
import scipy.linalg as splin
from scipy.signal import convolve, choose_conv_method
from numbers import Number
from typing import Union
class AbstractLinearOperator(spsparse.LinearOperator):
"""
Base class for linear operators, inherited from scipy.sparse.linalg.LinearOperator.
"""
def __init__(self, dtype: type, shape: tuple):
super(AbstractLinearOperator, self).__init__(shape=shape, dtype=dtype)
@abstractmethod
def pinv(self, x: np.ndarray):
pass
def proj(self, x: np.ndarray):
"""
Orthogonal projection onto the range of the linear operator.
:param x: np.ndarray
Vector to be projected.
:return: np.ndarray
Projected vector.
"""
return self.matvec(self.pinv(x))
def proj_conjugate(self, x: np.ndarray, sigma: float):
if not isinstance(sigma, Number):
raise ValueError("Parameter sigma must be numeric.")
return x - sigma * self.proj(x / sigma)
class LinearOperatorFromMatrix(AbstractLinearOperator):
"""
Class for linear operators defined from matrices.
:attribute mat: np.ndarray
Matrix representation of the linear operator.
:attribute adjoint: np.ndarray
Conjugate transpose of `mat`.
:attribute gram: np.ndarray
Gram matrix adjoint @ mat
:attribute norm, lipschitz_cst: float
Spectral norm of operator.
"""
def __init__(self, mat: np.ndarray):
"""
Initiliaze object of class.
:param mat: np.ndarray[L,N]
Matrix representation of the linear operator.
"""
# Check mat
try:
mat = np.asarray(mat)
except ValueError:
print("Input matrix must be a numpy array.")
# Init from super class
super(LinearOperatorFromMatrix, self).__init__(shape=mat.shape, dtype=mat.dtype)
# Matrix corresponding to the linear operator
self.mat = mat
# Adjoint
self.adjoint = mat.conj().transpose()
# Corresponding Gram matrix
self.gram = self.adjoint @ mat
# Spectral norm, Lipschitz constant
self.norm = self.lipschitz_cst = np.sqrt(
spsparse.eigs(self.gram, k=1, which='LM', return_eigenvectors=False, maxiter=int(5e4)))
def _matvec(self, x: np.ndarray):
"""
Matrix/vector product.
:param x: np.ndarray[N,]
Vector.
:return: np.ndarray[L,]
Vector resulting from matrix/vector product.
"""
M, N = self.shape
if x.shape != (N,) and x.shape != (N, 1):
raise ValueError('dimension mismatch')
return self.mat @ x
def _rmatvec(self, x: np.ndarray):
"""
Adjoint matrix/vector product.
:param x: np.ndarray[L,]
Vector.
:return: np.ndarray[N,]
Vector resulting from the adjoint matrix/vector product.
"""
M, N = self.shape
if x.shape != (M,) and x.shape != (M, 1):
raise ValueError('dimension mismatch')
return self.adjoint @ x
def pinv(self, x: np.ndarray, rcond: float = 1e-9):
"""
Evaluate the pseudo-inverse of the linear operator for a vector x.
:param x: np.ndarray[L,]
Vector.
:param rcond:
Cutoff for eigenvalues in `np.linalg.pinv`.
:return: np.ndarray[N,]
"""
M, N = self.shape
if x.shape != (M,) and x.shape != (M, 1):
raise ValueError('dimension mismatch')
inv_mat = np.linalg.pinv(self.mat, rcond=rcond)
return inv_mat @ x
class Id(LinearOperatorFromMatrix):
"""
Class for identity operator inherited from `LinearOperatorFromMatrix`.
"""
def __init__(self, n: int):
super(Id, self).__init__(mat=np.eye(n))
class ToeplitzificationOperator(AbstractLinearOperator):
"""
Class for Toeplitzification operator, inherited from `AbstractLinearOperator`.
:attribute P: int
Parameter P in [Section II.A,1].
:attribute M: int
Parameter M in [Section II.A,1].
:attribute N: int
Parameter N=2*M+1 in [Section II.A,1].
:attribute norm: float
Spectral norm of linear operator.
:attribute gram: np.ndarray
Diagonal Gram matrix stored as 1D array.
Reference: Section II.A of
[1] <NAME>., <NAME>., <NAME>. & <NAME>. (2020). Cadzow Plug-and-Play Gradient Descent for Generalised FRI.
Under review.
"""
def __init__(self, P: int, M: int, dtype: type = np.complex128):
"""
Initiliase Toeplitzification operator with parameter P acting on vectors of size N=2*M+1.
:param P: int,
:param M: int.
:param dtype: type
Type of the entries of the linear operator.
"""
# Check P
try:
P = int(P)
except ValueError:
print("P must be a number.")
# Check M
try:
M = int(M)
except ValueError:
print("M must be a number.")
self.P = P
self.M = M
self.N = 2 * M + 1
self.__offsets = -(np.arange(1, self.N + 1) - 1 - self.P)
# Init from super class
shape = ((self.N - self.P) * (self.P + 1), self.N)
super(ToeplitzificationOperator, self).__init__(shape=shape, dtype=dtype)
self.norm = | np.sqrt(self.P + 1) | numpy.sqrt |
import sys
from typing import List, Tuple
import numpy as np
import pandas as pd
def get_valid_gene_info(
genes: List[str],
release=102,
species='homo sapiens'
) -> Tuple[List[str], List[int], List[int], List[int]]:
"""Returns gene locations for all genes in ensembl release 93 --S Markson 3 June 2020
Parameters
----------
genes : A list of genes
genes : List[str] :
genes : List[str] :
genes : List[str] :
genes : List[str] :
genes : List[str] :
genes : List[str] :
genes : List[str] :
genes: List[str] :
Returns
-------
"""
from pyensembl import EnsemblRelease
assembly = EnsemblRelease(release, species=species)
gene_names = []
gene_contigs = []
gene_starts = []
gene_ends = []
for gene in np.intersect1d(genes, [
gene.gene_name for gene in assembly.genes()
if gene.contig.isnumeric() or gene.contig == 'X'
]): # Toss genes not in hg38 release 93
gene_info = assembly.genes_by_name(gene)
gene_info = gene_info[0]
gene_names.append(gene)
gene_contigs.append(gene_info.contig)
gene_starts.append(gene_info.start)
gene_ends.append(gene_info.end)
return gene_names, gene_contigs, gene_starts, gene_ends
def seurat_to_loom(seuratrds, patient_id_column, celltype_column,
complexity_column, loomfile):
"""
Parameters
----------
seuratrds :
patient_id_column :
celltype_column :
complexity_column :
loomfile :
Returns
-------
"""
import rpy2.robjects as robjects
from scipy import sparse
from rpy2.robjects import pandas2ri
import loompy
robjects.r('''
library(Seurat)
seurat2rawandmeta <- function(seuratrds) {
seuratobj <- readRDS(seuratrds)
return(list(genes=rownames(seuratobj@data), metadata=<EMAIL>, data=as.data.frame(summary(seuratobj@data))))
}
''')
seurat_grab = robjects.r['seurat2rawandmeta'](seuratrds)
genes = pd.DataFrame(np.array(seurat_grab.rx2('genes')))
genes.columns = ['gene']
metadata = pandas2ri.rpy2py_dataframe(seurat_grab.rx2('metadata'))
if patient_id_column != 'patient_ID':
metadata['patient_ID'] = metadata[patient_id_column]
metadata.drop(patient_id_column, inplace=True)
if celltype_column != 'cell_type':
metadata['cell_type'] = metadata[celltype_column]
metadata.drop(celltype_column, inplace=True)
if complexity_column != 'complexity':
metadata['complexity'] = metadata[complexity_column]
metadata.drop(complexity_column, inplace=True)
data_df = pandas2ri.rpy2py_dataframe(seurat_grab.rx2('data'))
sparsedata = sparse.coo_matrix(
(data_df['x'], (data_df['i'] - 1, data_df['j'] - 1))).tocsc()
sparsedata.resize((genes.shape[0], metadata.shape[0]))
loompy.create(loomfile, sparsedata, genes.to_dict("list"),
metadata.to_dict("list"))
def intify(df_init):
"""
Parameters
----------
df_init :
Returns
-------
"""
import binascii
df = df_init.copy()
for col in df.columns:
if col.endswith('_ad'):
raise Exception(
"Don't append you column names with _ad! -- Samuel")
df[col] = df[col].apply(
lambda x: int(binascii.hexlify(x.encode()), 16))
while np.sum(df.max() > sys.maxsize) > 0:
for col in df.columns:
if df[col].max() > sys.maxsize:
df[col + '_ad'] = df[col] // sys.maxsize
df[col] = df[col] % sys.maxsize
return df.astype(np.int64)
def deintify(df_init):
"""
Parameters
----------
df_init :
Returns
-------
"""
import binascii
df = df_init.copy()
while np.sum([x.endswith('_ad') for x in df.columns]) > 0:
for col in df.columns:
if col.endswith('_ad') and col + '_ad' not in df.columns:
df[col[0:-3]] = df[col[0:-3]].astype(object)
df[col] = df[col].astype(object)
df[col[0:-3]] = df[col[0:-3]] + sys.maxsize * df[col]
df.drop(col, axis=1, inplace=True)
for col in df.columns:
try:
df[col] = df[col].apply(
lambda x: binascii.unhexlify(hex(x)[2::].encode()).decode())
except:
print(df[col].apply(
lambda x: binascii.unhexlify(hex(x)[2::].encode()).decode()))
raise Exception("whoops")
return df
def recover_meta(db, do_deint=False):
"""
Parameters
----------
db :
do_deint :
(Default value = False)
Returns
-------
"""
colmeta = None
for key in db.ca.keys():
if colmeta is None:
colmeta = pd.DataFrame(db.ca[key])
colmeta.columns = [key]
else:
colmeta[key] = db.ca[key]
if do_deint:
colmeta = deintify(colmeta.astype(np.int64))
rowmeta = None
for key in db.ra.keys():
if rowmeta is None:
rowmeta = pd.DataFrame(db.ra[key])
rowmeta.columns = [key]
else:
rowmeta[key] = db.ra[key]
if do_deint:
rowmeta = deintify(rowmeta.astype(np.int64))
return rowmeta, colmeta
def we_can_pickle_it(thing, thingname: str):
"""
Parameters
----------
thing :
thingname : str :
thingname : str :
thingname : str :
thingname : str :
thingname: str :
Returns
-------
"""
import pickle
with open(thingname, 'wb') as f:
pickle.dump(thing, f, pickle.HIGHEST_PROTOCOL)
def we_can_unpickle_it(thingname: str):
"""
Parameters
----------
thingname : str :
thingname : str :
thingname : str :
thingname : str :
thingname: str :
Returns
-------
"""
import pickle
with open(thingname, 'rb') as f:
thing = pickle.load(f)
return thing
def get_alpha_concave_hull_polygon(xcoords, ycoords, alpha=0.1, buffer=1):
"""Much credit to https://thehumangeo.wordpress.com/2014/05/12/drawing-boundaries-in-python/
Parameters
----------
xcoords :
ycoords :
alpha :
(Default value = 0.1)
buffer :
(Default value = 1)
Returns
-------
"""
from shapely.ops import cascaded_union, polygonize
import shapely.geometry as geometry
from scipy.spatial import Delaunay
import numpy as np
import math
def alpha_shape(points, alpha):
"""Compute the alpha shape (concave hull) of a set
of points.
Parameters
----------
points :
Iterable container of points.
alpha :
alpha value to influence the
gooeyness of the border. Smaller numbers
don't fall inward as much as larger numbers.
Too large, and you lose everything!
Returns
-------
"""
if len(points) < 4:
# When you have a triangle, there is no sense
# in computing an alpha shape.
return geometry.MultiPoint(list(points)).convex_hull
def add_edge(edges, edge_points, coords, i, j):
"""Add a line between the i-th and j-th points,
if not in the list already
Parameters
----------
edges :
edge_points :
coords :
i :
j :
Returns
-------
"""
if (i, j) in edges or (j, i) in edges:
# already added
return
edges.add((i, j))
edge_points.append(coords[[i, j]])
coords = np.array([point.coords[0] for point in points])
tri = Delaunay(coords)
edges = set()
edge_points = []
# loop over triangles:
# ia, ib, ic = indices of corner points of the
# triangle
for ia, ib, ic in tri.vertices:
pa = coords[ia]
pb = coords[ib]
pc = coords[ic]
# Lengths of sides of triangle
a = math.sqrt((pa[0] - pb[0])**2 + (pa[1] - pb[1])**2)
b = math.sqrt((pb[0] - pc[0])**2 + (pb[1] - pc[1])**2)
c = math.sqrt((pc[0] - pa[0])**2 + (pc[1] - pa[1])**2)
# Semiperimeter of triangle
s = (a + b + c) / 2.0
# Area of triangle by Heron's formula
area = math.sqrt(s * (s - a) * (s - b) * (s - c))
circum_r = a * b * c / (4.0 * area)
# Here's the radius filter.
#print circum_r
if circum_r < 1.0 / alpha:
add_edge(edges, edge_points, coords, ia, ib)
add_edge(edges, edge_points, coords, ib, ic)
add_edge(edges, edge_points, coords, ic, ia)
m = geometry.MultiLineString(edge_points)
triangles = list(polygonize(m))
return cascaded_union(triangles), edge_points
points = []
for x, y in zip(xcoords, ycoords):
points.append(geometry.shape({'type': 'Point', 'coordinates': [x, y]}))
concave_hull, edge_points = alpha_shape(points, alpha=alpha)
return concave_hull.buffer(buffer)
def get_outlier_removal_mask(xcoords, ycoords, nth_neighbor=10, quantile=.9):
"""
Parameters
----------
xcoords :
ycoords :
nth_neighbor :
(Default value = 10)
quantile :
(Default value = .9)
Returns
-------
"""
from scipy.spatial.distance import pdist, squareform
D = squareform(pdist(np.vstack((xcoords, ycoords)).T))
distances = D[np.argsort(D, axis=0)[nth_neighbor - 1, :], 0]
return distances <= np.quantile(distances, quantile)
def cohensd(g1, g2):
"""
Returns Cohen's D for the effect size of group 1 values (g1) over group 2 values (g2).
Parameters
----------
g1 : group 1 values (list or numpy vector)
g2 : group 2 values (list or numpy vector)
Returns
-------
(mean(g1) - mean(g2) )/s, where s is the pooled standard deviation of the two groups with Bessel's correction
"""
n1 = len(g1)
n2 = len(g2)
s1 = np.std(g1, ddof=1)
s2 = np.std(g2, ddof=1)
s = np.sqrt(((n1 - 1) * s1 * s1 + (n2 - 1) * s2 * s2) / (n1 + n2 - 2))
return (np.mean(g1) - np.mean(g2)) / s
def phi_coefficient(contingency_table):
"""
Returns the phi-coefficient for a contingency table.
Paramenters
-----------
contingency_table : contingency table, identical in format to scipy.stats.fisher_exact
Returns
-------
phi coefficient
"""
table1 = contingency_table[0]
table2 = contingency_table[1]
table = np.vstack([table1, table2])
phitop = (table1[0] * table2[1] - table1[1] * table2[0])
phibottom = np.sqrt((table2[1]+table2[0])*\
(table1[1]+table1[0])*\
(table1[0]+table2[0])*\
(table2[1]+table1[1]))
phi = phitop / phibottom
return phi
def get_igraph_from_adjacency(adjacency, directed=None):
"""This is taken from scanpy._utils.__init__.py as of 12 August 2021
Get igraph graph from adjacency matrix."""
import igraph as ig
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=directed)
g.add_vertices(adjacency.shape[0]) # this adds adjacency.shape[0] vertices
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except KeyError:
pass
if g.vcount() != adjacency.shape[0]:
logg.warning(f'The constructed graph has only {g.vcount()} nodes. '
'Your adjacency matrix contained redundant nodes.')
return g
def convert_10x_h5(path_10x_h5,
output_file,
labelkey=None,
label='',
genes_as_ca=[],
gene_whitelist=None,
output_type='loom'):
import cellranger.matrix as cr_matrix
import loompy
output_type = output_file.split('.')[-1]
if output_type not in ['loom', 'pkl']:
raise Exception(
"output_file must be have suffix loom or pkl, denoting an output type of loom of pickle respectively"
)
filtered_feature_bc_matrix = cr_matrix.CountMatrix.load_h5_file(
path_10x_h5)
id2feature = {
val: key
for key, val in filtered_feature_bc_matrix.feature_ids_map.items()
}
features = [
id2feature[x].decode("utf-8")
for x in range(filtered_feature_bc_matrix.features_dim)
]
features_common_names = filtered_feature_bc_matrix.feature_ref.get_feature_names(
)
barcodes = filtered_feature_bc_matrix.bcs.astype(str)
ca = {'cellname': barcodes}
if labelkey is not None:
ca[labelkey] = [label] * len(barcodes)
m = filtered_feature_bc_matrix.m
if gene_whitelist is not None:
if len(gene_whitelist) > 0:
mask = np.isin(features, gene_whitelist)
m = m[mask, :]
features = list(np.array(features)[mask])
features_common_names = list(np.array(features_common_names)[mask])
if type(genes_as_ca) == str:
genes_as_ca = [genes_as_ca]
else:
genes_as_ca = list(genes_as_ca)
if len(genes_as_ca) > 0:
mask = np.isin(features, genes_as_ca)
if len(genes_as_ca) != mask.sum():
raise Exception(
"Improper mapping of row attributes; perhaps gene of interest not in loom.ra[\'gene\']?"
)
for gene in genes_as_ca:
submask = np.array(features) == gene
if np.sum(submask) > 1:
raise Exception("Two or more features with this name")
elif np.sum(submask) == 0:
raise Exception("No features with this name")
ca[gene] = list(m[submask, :].toarray()[0])
m = m[~mask, :]
features = list(np.array(features)[~mask])
features_common_names = list(np.array(features_common_names)[~mask])
ra = {'gene': features, 'gene_common_name': features_common_names}
if output_type == 'loom':
loompy.create(output_file, m, ra, ca)
if output_type == 'pkl':
if gene_whitelist is None:
raise Exception(
"pkl output intended only for saving a small subsetted geneset of interest. Please select a whitelist before saving as dataframe pkl."
)
mask = np.isin(features, gene_whitelist)
features = np.array(features)[mask]
features_common_names = np.array(features_common_names)[mask]
df = pd.DataFrame(m[mask, :].toarray())
df.index = features
if labelkey is not None:
df.columns = [labelkey + '_' + x for x in barcodes]
else:
df.columns = barcodes
df.to_pickle(output_file)
def create_split_exon_gtf(input_gtf, output_gtf, gene):
gtf = pd.read_table(input_gtf, header=None, comment='#')
gtf.columns = [
'seqname', 'source', 'feature', 'start', 'end', 'score', 'strand',
'frame', 'attribute'
]
gtf = gtf[gtf['feature'] == 'exon']
if type(gene) == str:
mask = gtf['attribute'].apply(
lambda x: 'gene_name "{}"'.format(gene) in x)
elif type(gene) in [list, tuple, np.array]:
mask = np.array([False] * len(gtf))
for g in gene:
mask = mask | gtf['attribute'].apply(
lambda x: 'gene_name "{}"'.format(g) in x)
gtf_unchanged = gtf[~mask]
gtf_changed = gtf[mask]
def append_exon_number_to_id_and_name(attribute):
exon_number = attribute.split('exon_number')[1].split(';')[0].split(
'\"')[-2]
old_gene_id_str = 'gene_id' + attribute.split('gene_id')[1].split(
';')[0]
new_gene_id_str = '\"'.join(
old_gene_id_str.split('\"')[0:-1]) + '-exon' + exon_number + '\"'
attribute = attribute.replace(old_gene_id_str, new_gene_id_str)
old_gene_name_str = 'gene_name' + attribute.split(
'gene_name')[1].split(';')[0]
new_gene_name_str = '\"'.join(
old_gene_name_str.split('\"')[0:-1]) + '-exon' + exon_number + '\"'
attribute = attribute.replace(old_gene_name_str, new_gene_name_str)
old_transcript_id_str = 'transcript_id' + attribute.split(
'transcript_id')[1].split(';')[0]
new_transcript_id_str = '\"'.join(
old_transcript_id_str.split('\"')
[0:-1]) + '-exon' + exon_number + '\"'
attribute = attribute.replace(old_transcript_id_str,
new_transcript_id_str)
old_transcript_name_str = 'transcript_name' + attribute.split(
'transcript_name')[1].split(';')[0]
new_transcript_name_str = '\"'.join(
old_transcript_name_str.split('\"')
[0:-1]) + '-exon' + exon_number + '\"'
attribute = attribute.replace(old_transcript_name_str,
new_transcript_name_str)
if 'ccds_id' in attribute:
old_ccds_id_str = 'ccds_id' + attribute.split('ccds_id')[1].split(
';')[0]
new_ccds_id_str = '\"'.join(old_ccds_id_str.split('\"')
[0:-1]) + '-exon' + exon_number + '\"'
attribute = attribute.replace(old_ccds_id_str, new_ccds_id_str)
return attribute
gtf_changed['attribute'] = gtf_changed['attribute'].apply(
append_exon_number_to_id_and_name)
gtf = pd.concat([gtf_changed, gtf_unchanged])
gtf.to_csv(output_gtf, sep='\t', index=False, header=None)
def get_umap_from_matrix(X,
random_state=17,
verbose=True,
min_dist=0.001,
n_neighbors=20,
metric='correlation'):
import umap
reducer = umap.UMAP(random_state=random_state,
verbose=verbose,
min_dist=min_dist,
n_neighbors=n_neighbors,
metric=metric)
return reducer.fit_transform(X)
def convert_h5ad(h5ad,
output_loom,
convert_obsm=True,
convert_varm=True,
convert_uns=True,
convert_layers=True):
import scanpy
import loompy
h5ad = scanpy.read_h5ad(h5ad)
ra = {'gene': np.array(h5ad.var.index)}
for col in h5ad.var.columns:
if col == 'gene':
raise Exception(
"var column of h5ad is \"gene\". This conflicts with panopticon loom format. You must rename before converting."
)
else:
ra[col] = np.array(h5ad.var[col].values)
ca = {'cellname': np.array(h5ad.obs.index)}
for col in h5ad.obs.columns:
if col == 'cellname':
raise Exception(
"obs column of h5ad is \"cellname\". This conflicts with panopticon loom format. You must rename before converting."
)
else:
ca[col] = np.array(h5ad.obs[col].values)
if convert_obsm:
for obsm_key in h5ad.obsm.keys():
for i in range(h5ad.obsm[obsm_key].shape[1]):
ca_key = "{}_{}".format(
obsm_key,
i + 1) # one added so that these are 1-indexed by default
if ca_key in ca.keys():
raise Exception(
"key\"{}\" already present as column attribute key. Please rename to avoid."
)
else:
ca[ca_key] = h5ad.obsm[obsm_key][:, i]
if convert_varm:
for varm_key in h5ad.varm.keys():
for i in range(h5ad.varm[varm_key].shape[1]):
ra_key = "{}_{}".format(
varm_key,
i + 1) # one added so that these are 1-indexed by default
if ra_key in ra.keys():
raise Exception(
"key\"{}\" already present as row attribute key. Please rename to avoid."
)
else:
ra[ra_key] = h5ad.varm[varm_key][:, i]
loompy.create(output_loom, h5ad.X.T, ra, ca)
if convert_uns:
loom = loompy.connect(output_loom)
for uns_key in h5ad.uns.keys():
loom.attrs[uns_key] = h5ad.uns[uns_key]
loom.close()
if convert_layers:
loom = loompy.connect(output_loom)
for layer_key in h5ad.layers.keys():
loom.layers[layer_key] = h5ad.layers[key].T
loom.close()
def get_UMI_curve_from_10x_h5(path_10x_h5, save_to_file=None):
import cellranger.matrix as cr_matrix
import matplotlib.pyplot as plt
bc_matrix = cr_matrix.CountMatrix.load_h5_file(path_10x_h5)
fig, ax = plt.subplots(figsize=(5, 5))
ax.plot(np.sort(bc_matrix.get_counts_per_bc())[::-1])
ax.set_title('UMI counts per barcode, sorted')
ax.set_ylabel('UMI counts')
ax.set_xlabel('cell rank, UMI counts (most to fewest)')
ax.set_xscale('log')
ax.set_yscale('log')
if save_to_file is None:
plt.show()
else:
plt.savefig(save_to_file)
plt.cla()
def get_dsb_normalization(cell_antibody_counts,
empty_droplet_antibody_counts,
use_isotype_control=True,
denoise_counts=True,
isotype_control_name_vec=None,
define_pseudocount=False,
pseudocount_use=10,
quantile_clipping=False,
quantile_clip=[0.001, 0.9995],
return_stats=False):
import rpy2.robjects as robjects
import rpy2.robjects.numpy2ri
if isotype_control_name_vec is None:
isotype_control_name_vec = robjects.r("NULL")
if (pseudocount_use != 10) and (not define_pseudocount):
raise Exception(
"\"define_pseudocount\" must be set to True to use pseudocount_use"
)
rpy2.robjects.numpy2ri.activate()
robjects.r('''
library(mclust)
library(dsb)
dsb <- function(cells,
empty,
use.isotype.control=TRUE,
denoise.counts=TRUE,
isotype.control.name.vec = NULL,
define.pseudocount = FALSE,
pseudocount.use = 10,
quantile.clipping = FALSE,
quantile.clip = c(0.001, 0.9995),
return.stats = FALSE){
DSBNormalizeProtein(cells, empty, use.isotype.control=use.isotype.control,
isotype.control.name.vec = isotype.control.name.vec,
denoise.counts=denoise.counts,
define.pseudocount = define.pseudocount,
pseudocount.use = pseudocount.use,
quantile.clipping = quantile.clipping,
quantile.clip = quantile.clip,
return.stats = return.stats)
}
''')
dsb = robjects.r['dsb']
return dsb(cell_antibody_counts,
empty_droplet_antibody_counts,
use_isotype_control=use_isotype_control,
denoise_counts=denoise_counts,
isotype_control_name_vec=isotype_control_name_vec,
define_pseudocount=define_pseudocount,
pseudocount_use=pseudocount_use,
quantile_clipping=quantile_clipping,
quantile_clip=quantile_clip,
return_stats=return_stats)
def get_cellphonedb_compatible_counts_and_meta(loom,
layername,
celltype_ca,
gene_ra='gene',
cellname_ca='cellname',
return_df=False,
output_prefix=None,
mouse_to_human=False):
if output_prefix is None and not return_df:
raise Exception(
"either output_prefix must be specified, or return_df must be True"
)
counts = pd.DataFrame(loom[layername][:, :])
counts.columns = loom.ca[cellname_ca]
#counts.insert(0, 'Gene', np.array([x.upper() for x in loom.ra[gene_ra]]))
genes = loom.ra[gene_ra]
if mouse_to_human:
from pybiomart import Server
server = Server(host="http://www.ensembl.org")
mouse_dataset = (server.marts['ENSEMBL_MART_ENSEMBL'].
datasets['mmusculus_gene_ensembl'])
mouse_data = mouse_dataset.query(
attributes=['ensembl_gene_id', 'external_gene_name'])
mouse_data['Gene upper'] = mouse_data['Gene name'].apply(
lambda x: str(x).upper())
human_dataset = (server.marts['ENSEMBL_MART_ENSEMBL'].
datasets['hsapiens_gene_ensembl'])
human_data = human_dataset.query(
attributes=['ensembl_gene_id', 'external_gene_name'])
conversion_dict = pd.merge(
mouse_data, human_data, left_on='Gene upper',
right_on='Gene name').set_index(
'Gene stable ID_x')['Gene stable ID_y'].to_dict()
convertible_mask = np.array(
[x in conversion_dict.keys() for x in genes])
genes = [
conversion_dict[x] if x in conversion_dict.keys() else np.nan
for x in genes
]
counts.insert(0, 'Gene', genes)
if mouse_to_human:
counts = counts.iloc[convertible_mask, :]
counts = counts.groupby('Gene').first().reset_index()
meta = pd.DataFrame(loom.ca[cellname_ca])
meta.columns = ['Cell']
meta['cell_type'] = loom.ca[celltype_ca]
if output_prefix is not None:
counts.to_csv(output_prefix + '_counts.txt', sep='\t', index=False)
meta.to_csv(output_prefix + '_meta.txt', sep='\t', index=False)
command = 'cellphonedb method statistical_analysis {0}_meta.txt {0}_counts.txt'.format(
output_prefix)
print("Run cellphonedb on command line with \"{}\"".format(command))
elif return_df:
return meta, counts
def create_gsea_txt_and_cls(loom,
layername,
output_prefix,
phenotypes,
cellmask=None,
gene_ra='gene',
cellname_ca='cellname'):
import os
if cellmask is None:
cellmask = np.array([True] * loom.shape[1])
if type(phenotypes) == str:
phenotypes = loom.ca[phenotypes]
if len(phenotypes) != cellmask.sum():
raise Exception(
"length of phenotypes vector must be equal to number of samples (cells)"
)
txt = pd.DataFrame(loom.ra[gene_ra])
txt.columns = ['NAME']
txt['DESCRIPTION'] = 'na'
#txt = pd.concat([txt,pd.DataFrame(loom[layername][:,cellmask])],axis=1)
#txt.columns = ['NAME','DESCRIPTION'] + list(loom.ca[cellname_ca][cellmask])
#txt.to_csv(output_prefix+'.txt',index=False,sep='\t')
total = cellmask.sum()
nphenotypes = len(np.unique(phenotypes))
outcls = output_prefix + '.cls'
if os.path.exists(outcls):
os.system("rm {}".format(outcls))
#raise Exception("cls file already present--cannot overwrite")
line1 = "{} {} 1".format(total, nphenotypes)
line2 = '# ' + ' '.join(np.unique(phenotypes))
phenotype2index = {
phenotype: i
for i, phenotype in enumerate(np.unique(phenotypes))
}
#print(phenotype2index)
#print([phenotype2index[x] for x in phenotypes])
line3 = ' '.join([str(phenotype2index[x]) for x in phenotypes])
for line in [line1, line2, line3]:
os.system('echo \"{}\">>{}'.format(line, outcls))
def get_cross_column_attribute_heatmap(loom,
ca1,
ca2,
normalization_axis=None):
#if type(normalization_axis) == list:
# outdfs = []
# for axis in normalization_axis:
# outdfs.append(get_cross_column_attribute_heatmap(loom, ca1, ca2, normalization_axis=axis))
# return outdfs
df = pd.DataFrame(loom.ca[ca1], copy=True)
df.columns = [ca1]
df[ca2] = loom.ca[ca2]
df = pd.DataFrame(df.groupby(ca1, )[ca2].value_counts())
df.columns = ['counts']
dfs = []
for i, df_group in df.reset_index().groupby(ca1):
dfs.append(
df_group.rename(columns={
'counts': 'counts_' + i
}).set_index(ca2)['counts_' + i])
outdf = pd.concat(dfs, axis=1)
if normalization_axis is None:
return outdf
elif normalization_axis == 0:
return np.divide(outdf, outdf.sum(axis=0).values)
elif normalization_axis == 1:
return np.divide(outdf.T, outdf.sum(axis=1).values).T
else:
raise Exception("normalization axis must be one of \"None\", 0, or 1")
def get_complement_contigency_tables(df):
if type(df) != pd.core.frame.DataFrame:
raise Exception("pandas dataframe expected input")
complement_contigency_table_dict = {}
for col in df.columns:
complement_contigency_table_dict[col] = {}
for index in df.index.values:
a = df.loc[index][col].sum()
b = df.loc[index][[x for x in df.columns if x != col]].sum()
c = df.loc[[x for x in df.index if x != index]][col].sum()
d = np.sum(df.loc[[x for x in df.index if x != index
]][[x for x in df.columns if x != col]].sum())
complement_contigency_table_dict[col][index] = [[a, b], [c, d]]
return complement_contigency_table_dict
def get_cluster_differential_expression_heatmap_df(loom,
layer,
clusteringlevel,
diffex={},
gene_name='gene',
cell_name='cellname'):
"""
Returns
-------
"""
from panopticon.analysis import get_cluster_differential_expression
import seaborn as sns
import pandas as pd
clusteredmask = []
for cluster in np.unique(loom.ca[clusteringlevel]):
mask = loom.ca[clusteringlevel] == cluster
if mask.sum() > 2:
clusteredmask.append(np.where(mask)[0])
clusteredmask = | np.hstack(clusteredmask) | numpy.hstack |
import unittest
import numpy
import test_utils
class TestBasicAddition(unittest.TestCase):
# Test basic addition of all combinations of all types, not checking for any edge cases specifically.
ZERO = numpy.float32(0)
ONE = numpy.float32(1)
MIN_SUBNORM = numpy.float32(1e-45)
MAX_SUBNORM = numpy.float32(1.1754942e-38)
MIN_NORM = numpy.float32(1.1754944e-38)
MAX_NORM = numpy.float32(3.4028235e38)
INF = numpy.float32(numpy.inf)
NAN = numpy.float32(numpy.nan)
# Initialise the tester object used to run the assembled code.
@classmethod
def setUpClass(cls):
cls.tester = test_utils.SubroutineTester("test_addition.s")
cls.tester.initialise()
# Run a test to compare the expected sum of two floats to the actual sum.
def run_test(self, float1: numpy.float32, float2: numpy.float32):
expected = float1 + float2
if numpy.isnan(expected):
self.assertTrue(numpy.isnan(TestBasicAddition.tester.run_test(float1, float2)))
else:
self.assertEqual(float1 + float2,
TestBasicAddition.tester.run_test(float1, float2))
def test_zero(self):
# Test that ±0 + x = x for all types of x.
self.run_test(self.ZERO, self.ZERO)
self.run_test(self.ZERO, -self.ZERO)
self.run_test(-self.ZERO, self.ZERO)
self.run_test(-self.ZERO, -self.ZERO)
self.run_test(self.ZERO, self.ONE)
self.run_test(self.ZERO, -self.ONE)
self.run_test(-self.ZERO, self.ONE)
self.run_test(-self.ZERO, -self.ONE)
self.run_test(self.ZERO, self.MIN_SUBNORM)
self.run_test(self.ZERO, -self.MIN_SUBNORM)
self.run_test(-self.ZERO, self.MIN_SUBNORM)
self.run_test(-self.ZERO, -self.MIN_SUBNORM)
self.run_test(self.ZERO, numpy.float32(9.060464e-39))
self.run_test(self.ZERO, -numpy.float32(9.060464e-39))
self.run_test(-self.ZERO, numpy.float32(9.060464e-39))
self.run_test(-self.ZERO, -numpy.float32(9.060464e-39))
self.run_test(self.ZERO, self.MAX_SUBNORM)
self.run_test(self.ZERO, -self.MAX_SUBNORM)
self.run_test(-self.ZERO, self.MAX_SUBNORM)
self.run_test(-self.ZERO, -self.MAX_SUBNORM)
self.run_test(self.ZERO, self.MIN_NORM)
self.run_test(self.ZERO, -self.MIN_NORM)
self.run_test(-self.ZERO, self.MIN_NORM)
self.run_test(-self.ZERO, -self.MIN_NORM)
self.run_test(self.ZERO, numpy.float32(395.6166))
self.run_test(self.ZERO, -numpy.float32(395.6166))
self.run_test(-self.ZERO, numpy.float32(395.6166))
self.run_test(-self.ZERO, -numpy.float32(395.6166))
self.run_test(self.ZERO, self.MAX_NORM)
self.run_test(self.ZERO, -self.MAX_NORM)
self.run_test(-self.ZERO, self.MAX_NORM)
self.run_test(-self.ZERO, -self.MAX_NORM)
self.run_test(self.ZERO, self.INF)
self.run_test(self.ZERO, -self.INF)
self.run_test(-self.ZERO, self.INF)
self.run_test(-self.ZERO, -self.INF)
self.run_test(self.ZERO, self.NAN)
self.run_test(-self.ZERO, self.NAN)
def test_one(self):
# Test ±1 + x for all types of x.
self.run_test(self.ONE, self.ZERO)
self.run_test(self.ONE, -self.ZERO)
self.run_test(-self.ONE, self.ZERO)
self.run_test(-self.ONE, -self.ZERO)
self.run_test(self.ONE, self.ONE)
self.run_test(self.ONE, -self.ONE)
self.run_test(-self.ONE, self.ONE)
self.run_test(-self.ONE, -self.ONE)
self.run_test(self.ONE, self.MIN_SUBNORM)
self.run_test(self.ONE, -self.MIN_SUBNORM)
self.run_test(-self.ONE, self.MIN_SUBNORM)
self.run_test(-self.ONE, -self.MIN_SUBNORM)
self.run_test(self.ONE, numpy.float32(1.902965e-39))
self.run_test(self.ONE, -numpy.float32(1.902965e-39))
self.run_test(-self.ONE, numpy.float32(1.902965e-39))
self.run_test(-self.ONE, -numpy.float32(1.902965e-39))
self.run_test(self.ONE, self.MAX_SUBNORM)
self.run_test(self.ONE, -self.MAX_SUBNORM)
self.run_test(-self.ONE, self.MAX_SUBNORM)
self.run_test(-self.ONE, -self.MAX_SUBNORM)
self.run_test(self.ONE, self.MIN_NORM)
self.run_test(self.ONE, -self.MIN_NORM)
self.run_test(-self.ONE, self.MIN_NORM)
self.run_test(-self.ONE, -self.MIN_NORM)
self.run_test(self.ONE, numpy.float32(7918.158))
self.run_test(self.ONE, -numpy.float32(7918.158))
self.run_test(-self.ONE, numpy.float32(7918.158))
self.run_test(-self.ONE, -numpy.float32(7918.158))
self.run_test(self.ONE, self.MAX_NORM)
self.run_test(self.ONE, -self.MAX_NORM)
self.run_test(-self.ONE, self.MAX_NORM)
self.run_test(-self.ONE, -self.MAX_NORM)
self.run_test(self.ONE, self.INF)
self.run_test(self.ONE, -self.INF)
self.run_test(-self.ONE, self.INF)
self.run_test(-self.ONE, -self.INF)
self.run_test(self.ONE, self.NAN)
self.run_test(-self.ONE, self.NAN)
def test_min_subnorm(self):
# Test ±MIN_SUBNORM + x for all types of x.
self.run_test(self.MIN_SUBNORM, self.ZERO)
self.run_test(self.MIN_SUBNORM, -self.ZERO)
self.run_test(-self.MIN_SUBNORM, self.ZERO)
self.run_test(-self.MIN_SUBNORM, -self.ZERO)
self.run_test(self.MIN_SUBNORM, self.ONE)
self.run_test(self.MIN_SUBNORM, -self.ONE)
self.run_test(-self.MIN_SUBNORM, self.ONE)
self.run_test(-self.MIN_SUBNORM, -self.ONE)
self.run_test(self.MIN_SUBNORM, self.MIN_SUBNORM)
self.run_test(self.MIN_SUBNORM, -self.MIN_SUBNORM)
self.run_test(-self.MIN_SUBNORM, self.MIN_SUBNORM)
self.run_test(-self.MIN_SUBNORM, -self.MIN_SUBNORM)
self.run_test(self.MIN_SUBNORM, numpy.float32(6.927885e-39))
self.run_test(self.MIN_SUBNORM, -numpy.float32(6.927885e-39))
self.run_test(-self.MIN_SUBNORM, numpy.float32(6.927885e-39))
self.run_test(-self.MIN_SUBNORM, -numpy.float32(6.927885e-39))
self.run_test(self.MIN_SUBNORM, self.MAX_SUBNORM)
self.run_test(self.MIN_SUBNORM, -self.MAX_SUBNORM)
self.run_test(-self.MIN_SUBNORM, self.MAX_SUBNORM)
self.run_test(-self.MIN_SUBNORM, -self.MAX_SUBNORM)
self.run_test(self.MIN_SUBNORM, self.MIN_NORM)
self.run_test(self.MIN_SUBNORM, -self.MIN_NORM)
self.run_test(-self.MIN_SUBNORM, self.MIN_NORM)
self.run_test(-self.MIN_SUBNORM, -self.MIN_NORM)
self.run_test(self.MIN_SUBNORM, numpy.float32(466603.3))
self.run_test(self.MIN_SUBNORM, -numpy.float32(466603.3))
self.run_test(-self.MIN_SUBNORM, numpy.float32(466603.3))
self.run_test(-self.MIN_SUBNORM, -numpy.float32(466603.3))
self.run_test(self.MIN_SUBNORM, self.MAX_NORM)
self.run_test(self.MIN_SUBNORM, -self.MAX_NORM)
self.run_test(-self.MIN_SUBNORM, self.MAX_NORM)
self.run_test(-self.MIN_SUBNORM, -self.MAX_NORM)
self.run_test(self.MIN_SUBNORM, self.INF)
self.run_test(self.MIN_SUBNORM, -self.INF)
self.run_test(-self.MIN_SUBNORM, self.INF)
self.run_test(-self.MIN_SUBNORM, -self.INF)
self.run_test(self.MIN_SUBNORM, self.NAN)
self.run_test(-self.MIN_SUBNORM, self.NAN)
def test_subnorm(self):
# Test ±x + y for subnormal x and all types of y.
self.run_test(numpy.float32(7.518523e-39), self.ZERO)
self.run_test(numpy.float32(7.518523e-39), -self.ZERO)
self.run_test(-numpy.float32(7.518523e-39), self.ZERO)
self.run_test(-numpy.float32(7.518523e-39), -self.ZERO)
self.run_test(numpy.float32(2.028916e-39), self.ONE)
self.run_test(numpy.float32(2.028916e-39), -self.ONE)
self.run_test(-numpy.float32(2.028916e-39), self.ONE)
self.run_test(-numpy.float32(2.028916e-39), -self.ONE)
self.run_test(numpy.float32(4.042427e-39), self.MIN_SUBNORM)
self.run_test(numpy.float32(4.042427e-39), -self.MIN_SUBNORM)
self.run_test(-numpy.float32(4.042427e-39), self.MIN_SUBNORM)
self.run_test(-numpy.float32(4.042427e-39), -self.MIN_SUBNORM)
self.run_test(numpy.float32(9.636327e-39), numpy.float32(1.0185049e-38))
self.run_test(numpy.float32(9.636327e-39), -numpy.float32(1.0185049e-38))
self.run_test(-numpy.float32(9.636327e-39), numpy.float32(1.0185049e-38))
self.run_test(- | numpy.float32(9.636327e-39) | numpy.float32 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.neighbors import NearestNeighbors
import numpy as np
from layers import (
CMul,
Flatten,
ConcatTable,
Identity,
Reshape,
SpatialAttention2d,
WeightedSum2d,
ChannelAttention,
SpatialAttention,
WPCA
)
# based on https://github.com/lyakaap/NetVLAD-pytorch/blob/master/netvlad.py
class NetVLAD(nn.Module):
"""NetVLAD layer implementation"""
def __init__(self, num_clusters=64, dim=128,
normalize_input=True, vladv2=False):
"""
Args:
num_clusters : int
The number of clusters
dim : int
Dimension of descriptors
alpha : float
Parameter of initialization. Larger value is harder assignment.
normalize_input : bool
If true, descriptor-wise L2 normalization is applied to input.
vladv2 : bool
If true, use vladv2 otherwise use vladv1
"""
super(NetVLAD, self).__init__()
self.num_clusters = num_clusters
self.dim = dim
self.alpha = 0
self.vladv2 = vladv2
self.normalize_input = normalize_input
self.conv = nn.Conv2d(dim, num_clusters, kernel_size=(1, 1), bias=vladv2)
self.centroids = nn.Parameter(torch.rand(num_clusters, dim))
def init_params(self, clsts, traindescs):
#TODO replace numpy ops with pytorch ops
if self.vladv2 == False:
print('using vladv1')
print(clsts,traindescs)
clstsAssign = clsts / np.linalg.norm(clsts, axis=1, keepdims=True)
print(clstsAssign)
dots = np.dot(clstsAssign, traindescs.T)
dots.sort(0)
dots = dots[::-1, :] # sort, descending
print(np.mean(dots[0,:] - dots[1,:]),type(np.mean(dots[0,:] - dots[1,:])))
self.alpha = (-np.log(0.01) / np.mean(dots[0,:] - dots[1,:])).item()
self.centroids = nn.Parameter(torch.from_numpy(clsts))
self.conv.weight = nn.Parameter(torch.from_numpy(self.alpha*clstsAssign).unsqueeze(2).unsqueeze(3))
self.conv.bias = None
else:
print('using vladv2')
knn = NearestNeighbors(n_jobs=-1) #TODO faiss?
knn.fit(traindescs)
del traindescs
dsSq = np.square(knn.kneighbors(clsts, 2)[1])
del knn
self.alpha = (-np.log(0.01) / np.mean(dsSq[:,1] - dsSq[:,0])).item()
self.centroids = nn.Parameter(torch.from_numpy(clsts))
del clsts, dsSq
self.conv.weight = nn.Parameter(
(2.0 * self.alpha * self.centroids).unsqueeze(-1).unsqueeze(-1)
)
self.conv.bias = nn.Parameter(
- self.alpha * self.centroids.norm(dim=1)
)
def forward(self, x):
N, C = x.shape[:2]
if self.normalize_input:
x = F.normalize(x, p=2, dim=1) # across descriptor dim
# soft-assignment
soft_assign = self.conv(x).view(N, self.num_clusters, -1)
soft_assign = F.softmax(soft_assign, dim=1)
x_flatten = x.view(N, C, -1)
# calculate residuals to each clusters
vlad = torch.zeros([N, self.num_clusters, C], dtype=x.dtype, layout=x.layout, device=x.device)
for C in range(self.num_clusters): # slower than non-looped, but lower memory usage
residual = x_flatten.unsqueeze(0).permute(1, 0, 2, 3) - \
self.centroids[C:C+1, :].expand(x_flatten.size(-1), -1, -1).permute(1, 2, 0).unsqueeze(0)
residual *= soft_assign[:,C:C+1,:].unsqueeze(2)
vlad[:,C:C+1,:] = residual.sum(dim=-1)
vlad = F.normalize(vlad, p=2, dim=2) # intra-normalization
vlad = vlad.view(x.size(0), -1) # flatten
vlad = F.normalize(vlad, p=2, dim=1) # L2 normalize
return vlad
class NetVLAD_res(nn.Module):
"""NetVLAD layer implementation"""
def __init__(self, num_clusters=64, dim=128,
normalize_input=True, vladv2=False):
super(NetVLAD_res, self).__init__()
self.num_clusters = num_clusters
self.dim = dim
self.alpha = 0
self.vladv2 = vladv2
self.normalize_input = normalize_input
self.conv = nn.Conv2d(dim, num_clusters, kernel_size=(1, 1), bias=vladv2)
self.centroids = nn.Parameter(torch.rand(num_clusters, dim))
def init_params(self, clsts, traindescs):
#TODO replace numpy ops with pytorch ops
if self.vladv2 == False:
print('using vladv1')
print(clsts,traindescs)
clstsAssign = clsts / np.linalg.norm(clsts, axis=1, keepdims=True)
print(clstsAssign)
dots = np.dot(clstsAssign, traindescs.T)
dots.sort(0)
dots = dots[::-1, :] # sort, descending
print(np.mean(dots[0,:] - dots[1,:]),type(np.mean(dots[0,:] - dots[1,:])))
self.alpha = (-np.log(0.01) / np.mean(dots[0,:] - dots[1,:])).item()
self.centroids = nn.Parameter(torch.from_numpy(clsts))
self.conv.weight = nn.Parameter(torch.from_numpy(self.alpha*clstsAssign).unsqueeze(2).unsqueeze(3))
self.conv.bias = None
else:
print('using vladv2')
knn = NearestNeighbors(n_jobs=-1) #TODO faiss?
knn.fit(traindescs)
del traindescs
dsSq = np.square(knn.kneighbors(clsts, 2)[1])
del knn
self.alpha = (-np.log(0.01) / np.mean(dsSq[:,1] - dsSq[:,0])).item()
self.centroids = nn.Parameter(torch.from_numpy(clsts))
del clsts, dsSq
self.conv.weight = nn.Parameter(
(2.0 * self.alpha * self.centroids).unsqueeze(-1).unsqueeze(-1)
)
self.conv.bias = nn.Parameter(
- self.alpha * self.centroids.norm(dim=1)
)
def forward(self, x):
N, C = x.shape[:2] # 24 256 31 31
if self.normalize_input:
x = F.normalize(x, p=2, dim=1) # across descriptor dim
# soft-assignment
soft_assign = self.conv(x).view(N, self.num_clusters, -1) # 24 64 961
soft_assign = F.softmax(soft_assign, dim=1)
x_flatten = x.view(N, C, -1) #24 256 961
# calculate residuals to each clusters
vlad = torch.zeros([N, self.num_clusters, C], dtype=x.dtype, device=x.device) #24 64 256
residuals = torch.zeros([N, self.num_clusters, C ,961], dtype=x.dtype, layout=x.layout, device=x.device) # 24 64 256 961
for C in range(self.num_clusters): # slower than non-looped, but lower memory usage
residual = x_flatten.unsqueeze(0).permute(1, 0, 2, 3) - \
self.centroids[C:C+1, :].expand(x_flatten.size(-1), -1, -1).permute(1, 2, 0).unsqueeze(0)
residual *= soft_assign[:,C:C+1,:].unsqueeze(2) # 24 1 256 961 * 24 1 1 961 = 24 1 256 961
#print(residual.size())
#print('res size')
residuals[:,C:C+1,:,:] = residual # [24, 1, 256, 961]
#print('return residuals {}'.format(residuals.size()))
return residuals
class AttenNetVLAD(nn.Module):
def __init__(self, num_clusters=64, dim=128,attention=None,mul=True,actv='relu',da_type=None,add_relu=True,
normalize_input=True, vladv2=False,ratio=4):
super(AttenNetVLAD, self).__init__()
self.num_clusters = num_clusters
self.dim = dim
self.alpha = 0
self.da_type = da_type
self.vladv2 = vladv2
self.normalize_input = normalize_input
self.mul = mul
print(mul)
print('***')
self.conv = nn.Conv2d(dim, num_clusters, kernel_size=(1, 1), bias=vladv2)
self.centroids = nn.Parameter(torch.rand(num_clusters, dim))
self.attention=attention
if attention in ['casa']:
self.ca = ChannelAttention(dim,actv,ratio)
print('using channel attention')
if attention in ['delf_attention']:
self.atten = SpatialAttention2d(in_c=dim,act_fn=actv)
print('using delf attention')
#elif attention in ['casa']:
# self.ca = ChannelAttention(dim,actv,ratio)
# self.atten = ChannelSpatialAttention(dim)
else:
a_layer=[]
if add_relu:
a_layer.append(nn.ReLU(inplace=True))
a_layer.append(nn.Conv2d(dim,1,1,1))
print('using attention to get heatmap')
if actv == 'relu':
a_layer.append(nn.ReLU())
print('using relu')
elif actv =='prelu':
print('**using prelu')
a_layer.append(nn.PReLU())
elif actv =='sigmoid':
print('using sigmoid')
a_layer.append(nn.Sigmoid())
elif actv =='softplus':
print('using softplus')
a_layer.append(nn.Softplus(beta=1, threshold=20))
self.atten = nn.Sequential(*a_layer) #nn.Conv2d(dim,1,1,1,bias=False)#nn.Sequential(*a_layer)
def init_params(self, clsts, traindescs):
#TODO replace numpy ops with pytorch ops
if self.vladv2 == False:
print('using vladv1')
print(clsts,traindescs)
clstsAssign = clsts / np.linalg.norm(clsts, axis=1, keepdims=True)
print(clstsAssign)
dots = | np.dot(clstsAssign, traindescs.T) | numpy.dot |
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pickle
from combined_thresh import combined_thresh
from perspective_transform import perspective_transform
def line_fit(binary_warped, T):
"""
Find and fit lane lines
"""
# Assuming you have created a warped binary image called "binary_warped"
# Take a histogram of the bottom half of the image
# 假设你已经创建了一个名为“binary_warped”的变形二进制图像,获取图像下半部分的直方图
# axis=0 按列计算
img_roi_y = 700 # [1]设置ROI区域的左上角的起点
img_roi_x = 0
img_roi_height = binary_warped.shape[0] # [2]设置ROI区域的高度
img_roi_width = binary_warped.shape[1] # [3]设置ROI区域的宽度
img_roi = binary_warped[img_roi_y:img_roi_height, img_roi_x:img_roi_width]
# cv2.imshow('img_roi', img_roi)
histogram = np.sum(img_roi[0 :, :], axis=0)
# histogram = np.sum(img_roi[int(np.floor(binary_warped.shape[0]*(1-T))):,:], axis=0)
# plt.show()
# Create an output image to draw on and visualize the result
# 创建一个输出图像来绘制并可视化结果
out_img = (np.dstack((binary_warped, binary_warped, binary_warped))*255).astype('uint8')
cv2.rectangle(out_img, (img_roi_x, img_roi_y), (img_roi_width, img_roi_height), (255, 0, 0), 5)
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
# 找出直方图左右两半的峰值 这些将成为左右线的起点
midpoint = np.int(histogram.shape[0]/2)
leftx_base = np.argmax(histogram[100:midpoint]) + 100
rightx_base = np.argmax(histogram[midpoint:-100]) + midpoint
# PMH:如果一边未检测到车道线,即无直方图峰值,则根据另一条车道线复制一个搜索起点
if (leftx_base == 100):
leftx_base = np.argmax(histogram[midpoint:-100]) - midpoint
if (rightx_base == midpoint):
rightx_base = np.argmax(histogram[100:midpoint]) + midpoint
# Choose the number of sliding windows 选择滑动窗口的数量
nwindows = 9
# Set height of windows
# 设置窗口的高度 128
window_height = np.int(binary_warped.shape[0]/nwindows)
# Identify the x and y positions of all nonzero pixels in the image
# 确定图像中所有非零像素的x和y位置
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
# 为每个窗口更新当前位置
leftx_current = leftx_base
rightx_current = rightx_base
leftx_current_last = leftx_base
rightx_current_last = rightx_base
leftx_current_next = leftx_base
rightx_current_next = rightx_base
# Set the width of the windows +/- margin
# 设置窗口+/-边距的宽度
margin = 150
# Set minimum number of pixels found to recenter window
# 设置发现到最近窗口的最小像素数
minpix = 50
# Create empty lists to receive left and right lane pixel indices
# 创建空列表以接收左右车道像素索引
left_lane_inds = []
right_lane_inds = []
# plt.figure(2)
# plt.subplot(2, 1, 1)
# plt.plot(histogram)
# Step through the windows one by one
# 逐一浏览窗口
for window in range(nwindows-2):
# Identify window boundaries in x and y (and right and left)
# 确定x和y(以及右和左)的窗口边界
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
leftx_current = leftx_current_next
rightx_current = rightx_current_next
# 设置滑移窗口左右边界
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
# 在可视化图像上绘制窗口
cv2.rectangle(out_img, (win_xleft_low, win_y_low), (win_xleft_high, win_y_high), (0, 255, 0), 2)
cv2.rectangle(out_img, (win_xright_low, win_y_low), (win_xright_high, win_y_high), (0, 255, 0), 2)
# plt.subplot(2, 1, 2)
# plt.imshow(out_img, cmap='gray', vmin=0, vmax=1)
# Identify the nonzero pixels in x and y within the window
# 确定窗口内x和y的非零像素
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
# 将这些索引附加到列表中
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
# 如果找到> minpix像素,请在其平均位置上重新调整下一个窗口
if len(good_left_inds) > minpix:
leftx_current_next = np.int(np.mean(nonzerox[good_left_inds]))
else:
if window > 2:
leftx_current_next = leftx_current + (leftx_current - leftx_current_last)
# good_left_inds = int((win_y_low + win_y_high) / 2) * binary_warped.shape[0] + leftx_current
# left_lane_inds.append(np.int64(good_left_inds)) # 20180516 pmh 加入方框中点作为拟合点
else:
leftx_current_next = leftx_base
if len(good_right_inds) > minpix:
rightx_current_next = np.int(np.mean(nonzerox[good_right_inds]))
else:
if window > 2:
rightx_current_next = rightx_current + (rightx_current - rightx_current_last)
# right_lane_inds.append(good_right_inds)
else:
rightx_current_next = rightx_base
leftx_current_last = leftx_current
rightx_current_last = rightx_current
# plt.figure(2)
# plt.subplot(2, 1, 1)
# plt.plot(histogram)
# plt.subplot(2, 1, 2)
# plt.imshow(out_img, cmap='gray', vmin=0, vmax=1)
# cv2.imshow('out_img', out_img)
# plt.savefig('D:/CIDI/data/L/line_fit_histo/')
# plt.close()
# save_file = '%s%06d%s' % ('D:/data/PNG20180206dataAllRectJPG/result1/', num_i+100000, 'Lr.jpg')
# fig1 = plt.gcf()
# fig1.set_size_inches(18.5, 10.5)
# plt.savefig(save_file)
# Concatenate the arrays of indices连接索引数组
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = | np.concatenate(right_lane_inds) | numpy.concatenate |
import numpy as np
import scipy.spatial as spatial
from source.base import file_utils
def get_aabb(points: np.ndarray):
aabb_min = points.min(axis=0)
aabb_max = points.max(axis=0)
return aabb_min, aabb_max
def load_xyz(file_path):
data = np.loadtxt(file_path).astype('float32')
nan_lines = np.isnan(data).any(axis=1)
num_nan_lines = np.sum(nan_lines)
if num_nan_lines > 0:
data = data[~nan_lines] # filter rows with nan values
print('Ignored {} points containing NaN coordinates in point cloud {}'.format(num_nan_lines, file_path))
return data
def write_ply(file_path: str, points: np.ndarray, normals=None, colors=None):
"""
Write point cloud file as .ply.
:param file_path:
:param points:
:param normals:
:param colors:
:return: None
"""
import trimesh
assert(file_path.endswith('.ply'))
file_utils.make_dir_for_file(file_path)
if points.shape == (3,):
points = np.expand_dims(points, axis=0)
if points.shape[0] == 3 and points.shape[1] != 3:
points = points.transpose([1, 0])
if colors is not None and colors.shape[0] == 3 and colors.shape[1] != 3:
colors = colors.transpose([1, 0])
if normals is not None and normals.shape[0] == 3 and normals.shape[1] != 3:
normals = normals.transpose([1, 0])
# convert 2d points to 3d
if points.shape[1] == 2:
vertices_2p5d = np.zeros((points.shape[0], 3))
vertices_2p5d[:, :2] = points
vertices_2p5d[:, 2] = 0.0
points = vertices_2p5d
mesh = trimesh.Trimesh(vertices=points, vertex_colors=colors, vertex_normals=normals)
mesh.export(file_path)
def write_xyz(file_path, points: np.ndarray, normals=None, colors=None):
"""
Write point cloud file.
:param file_path:
:param points:
:param normals:
:param colors:
:return: None
"""
file_utils.make_dir_for_file(file_path)
if points.shape == (3,):
points = np.expand_dims(points, axis=0)
if points.shape[0] == 3 and points.shape[1] != 3:
points = points.transpose([1, 0])
if colors is not None and colors.shape[0] == 3 and colors.shape[1] != 3:
colors = colors.transpose([1, 0])
if normals is not None and normals.shape[0] == 3 and normals.shape[1] != 3:
normals = normals.transpose([1, 0])
with open(file_path, 'w') as fp:
# convert 2d points to 3d
if points.shape[1] == 2:
vertices_2p5d = np.zeros((points.shape[0], 3))
vertices_2p5d[:, :2] = points
vertices_2p5d[:, 2] = 0.0
points = vertices_2p5d
# write points
# meshlab doesn't like colors, only using normals. try cloud compare instead.
for vi, v in enumerate(points):
line_vertex = str(v[0]) + " " + str(v[1]) + " " + str(v[2]) + " "
if normals is not None:
line_vertex += str(normals[vi][0]) + " " + str(normals[vi][1]) + " " + str(normals[vi][2]) + " "
if colors is not None:
line_vertex += str(colors[vi][0]) + " " + str(colors[vi][1]) + " " + str(colors[vi][2]) + " "
fp.write(line_vertex + "\n")
def load_pcd(file_in):
# PCD: http://pointclouds.org/documentation/tutorials/pcd_file_format.php
# PCD RGB: http://docs.pointclouds.org/trunk/structpcl_1_1_r_g_b.html#a4ad91ab9726a3580e6dfc734ab77cd18
def read_header(lines_header):
header_info = dict()
def add_line_to_header_dict(header_dict, line, expected_field):
line_parts = line.split(sep=' ')
assert (line_parts[0] == expected_field), \
('Warning: "' + expected_field + '" expected but not found in pcd header!')
header_dict[expected_field] = (' '.join(line_parts[1:])).replace('\n', '')
add_line_to_header_dict(header_info, lines_header[0], '#')
add_line_to_header_dict(header_info, lines_header[1], 'VERSION')
add_line_to_header_dict(header_info, lines_header[2], 'FIELDS')
add_line_to_header_dict(header_info, lines_header[3], 'SIZE')
add_line_to_header_dict(header_info, lines_header[4], 'TYPE')
add_line_to_header_dict(header_info, lines_header[5], 'COUNT')
add_line_to_header_dict(header_info, lines_header[6], 'WIDTH')
add_line_to_header_dict(header_info, lines_header[7], 'HEIGHT')
add_line_to_header_dict(header_info, lines_header[8], 'VIEWPOINT')
add_line_to_header_dict(header_info, lines_header[9], 'POINTS')
add_line_to_header_dict(header_info, lines_header[10], 'DATA')
# TODO: lift limitations
assert header_info['VERSION'] == '0.7'
assert header_info['FIELDS'] == 'x y z rgb label'
assert header_info['SIZE'] == '4 4 4 4 4'
assert header_info['TYPE'] == 'F F F F U'
assert header_info['COUNT'] == '1 1 1 1 1'
# assert header_info['HEIGHT'] == '1'
assert header_info['DATA'] == 'ascii'
# assert header_info['WIDTH'] == header_info['POINTS']
return header_info
f = open(file_in, "r")
f_lines = f.readlines()
f_lines_header = f_lines[:11]
f_lines_points = f_lines[11:]
header_info = read_header(f_lines_header)
header_info['_file_'] = file_in
num_points = int(header_info['POINTS'])
point_data_list_str_ = [l.split(sep=' ')[:3] for l in f_lines_points]
point_data_list = [[float(l[0]), float(l[1]), float(l[2])] for l in point_data_list_str_]
# filter nan points that appear through the blensor kinect sensor
point_data_list = [p for p in point_data_list if
(not np.isnan(p[0]) and not np.isnan(p[1]) and not np.isnan(p[2]))]
point_data = np.array(point_data_list)
f.close()
return point_data, header_info
def get_patch_radius(grid_res, epsilon):
return (1.0 + epsilon) / grid_res
def get_patch_kdtree(
kdtree: spatial.cKDTree, rng: np.random.RandomState,
query_point, patch_radius, points_per_patch, n_jobs):
if patch_radius <= 0.0:
pts_dists_ms, patch_pts_ids = kdtree.query(x=query_point, k=points_per_patch, n_jobs=n_jobs)
else:
patch_pts_ids = kdtree.query_ball_point(x=query_point, r=patch_radius, n_jobs=n_jobs)
patch_pts_ids = np.array(patch_pts_ids, dtype=np.int32)
point_count = patch_pts_ids.shape[0]
# if there are too many neighbors, pick a random subset
if point_count > points_per_patch:
patch_pts_ids = patch_pts_ids[rng.choice( | np.arange(point_count) | numpy.arange |
from environment import *
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import sys
from torch.autograd import Variable
from matplotlib import gridspec
import pickle
# import or_gym
from or_gym.utils import create_env
# import ray
# from ray.rllib import agents
from ray import tune
# from stable_baselines.common.env_checker import check_env
### Parameters of the model ###
ENV_NAME = 'InvManagement-v1'
env = InvManagementMasterEnv()
EPISODE_DURATION = 300
ALPHA_INIT = 0.1
SCORE = 195.0
NUM_EPISODES = 100
LEFT = 0
RIGHT = 1
VERBOSE = True
# DISCRETE_OS_SIZE = [20, 20]
# discrete_os_win_size = (env.observation_space.high - env.observation_space.low)/DISCRETE_OS_SIZE
### Check environment (if necessary) ###
# If the environment don't follow the interface, an error will be thrown
# check_env(env, warn=True)
# #Checking
# env.render()
### RenderWrapper for display ###
class RenderWrapper:
def __init__(self, env, force_gif=False):
self.env = env
self.force_gif = force_gif
self.reset()
def reset(self):
self.images = []
def render(self):
self.env.render()
time.sleep(1./60.)
def make_gif(self, filename="render"):
if is_colab() or self.force_gif:
imageio.mimsave(filename + '.gif', [np.array(img) for i, img in enumerate(self.images) if i%2 == 0], fps=29)
return Image(open(filename + '.gif','rb').read())
@classmethod
def register(cls, env, force_gif=False):
env.render_wrapper = cls(env, force_gif=True)
##### Policy Implementsation #####
# Constants
GAMMA = 0.9
class PolicyNetwork(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size, learning_rate=3e-4):
super(PolicyNetwork, self).__init__()
self.num_actions = num_actions
self.linear1 = nn.Linear(num_inputs, hidden_size)
self.linear2 = nn.Linear(hidden_size, num_actions)
self.optimizer = optim.Adam(self.parameters(), lr=learning_rate)
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.softmax(self.linear2(x), dim=1)
return x
def get_action(self, state):
state = torch.from_numpy(state).float().unsqueeze(0)
probs = self.forward(Variable(state))
highest_prob_action = np.random.choice(self.num_actions, 3, p=np.squeeze(probs.detach().numpy()))
# print(highest_prob_action, np.shape(np.squeeze(probs.detach().numpy())), self.num_actions)
# highest_prob_action = np.argmax(np.squeeze(probs.detach().numpy()))
log_prob = torch.log(probs.squeeze(0)[highest_prob_action])
return highest_prob_action, log_prob
### Update Policy ###
def update_policy(policy_network, rewards, log_probs):
discounted_rewards = []
for t in range(len(rewards)):
Gt = 0
pt = 0
for r in rewards[t:]:
Gt = Gt + GAMMA ** pt * r
pt = pt + 1
discounted_rewards.append(Gt)
discounted_rewards = torch.tensor(discounted_rewards)
discounted_rewards = (discounted_rewards - discounted_rewards.mean()) / (
discounted_rewards.std() + 1e-9) # normalize discounted rewards
policy_gradient = []
for log_prob, Gt in zip(log_probs, discounted_rewards):
policy_gradient.append(-log_prob * Gt)
policy_network.optimizer.zero_grad()
policy_gradient = torch.stack(policy_gradient).sum()
policy_gradient.backward()
policy_network.optimizer.step()
def main(env, distr_name):
policy_net = PolicyNetwork(env.observation_space.shape[0], 100, 128)
max_episode_num = 1000
max_steps = 10000
numsteps = []
avg_numsteps = []
all_rewards = []
results = []
for episode in range(max_episode_num):
state = env.reset()
log_probs = []
rewards = []
actions = []
for steps in range(max_steps):
action, log_prob = policy_net.get_action(state)
new_state, reward, done, _ = env.step(action)
log_probs.append(log_prob)
rewards.append(reward)
if done:
total_reward = np.sum(rewards)
update_policy(policy_net, rewards, log_probs)
numsteps.append(steps)
avg_numsteps.append(np.mean(numsteps[-10:]))
all_rewards.append(total_reward)
avg_reward = np.round(np.mean(all_rewards[-10:]), decimals=3)
results.append(avg_reward)
if max(all_rewards) == total_reward:
print(episode, total_reward)
if episode % 50 == 0:
print("Number of episode: {}, Total reward: {}, Average_reward: {}, length: {}\n".format(
episode, np.round( | np.sum(all_rewards) | numpy.sum |
###
# pySuStaIn: a Python implementation of the Subtype and Stage Inference (SuStaIn) algorithm
#
# If you use pySuStaIn, please cite the following core papers:
# 1. The original SuStaIn paper: https://doi.org/10.1038/s41467-018-05892-0
# 2. The pySuStaIn software paper: https://doi.org/10.1016/j.softx.2021.100811
#
# Please also cite the corresponding progression pattern model you use:
# 1. The piece-wise linear z-score model (i.e. ZscoreSustain): https://doi.org/10.1038/s41467-018-05892-0
# 2. The event-based model (i.e. MixtureSustain): https://doi.org/10.1016/j.neuroimage.2012.01.062
# with Gaussian mixture modeling (i.e. 'mixture_gmm'): https://doi.org/10.1093/brain/awu176
# or kernel density estimation (i.e. 'mixture_kde'): https://doi.org/10.1002/alz.12083
# 3. The model for discrete ordinal data (i.e. OrdinalSustain): https://doi.org/10.3389/frai.2021.613261
#
# Thanks a lot for supporting this project.
#
# Authors: <NAME> (<EMAIL>) and <NAME> (<EMAIL>)
# Contributors: <NAME> (<EMAIL>), <NAME> (<EMAIL>), <NAME> (<EMAIL>)
###
import warnings
from tqdm.auto import tqdm
import numpy as np
import scipy.stats as stats
from matplotlib import pyplot as plt
from pySuStaIn.AbstractSustain import AbstractSustainData
from pySuStaIn.AbstractSustain import AbstractSustain
#*******************************************
#The data structure class for MixtureSustain. It holds the positive/negative likelihoods that get passed around and re-indexed in places.
class MixtureSustainData(AbstractSustainData):
def __init__(self, L_yes, L_no, numStages):
assert(L_yes.shape[0] == L_no.shape[0] and L_yes.shape[1] == L_no.shape[1])
self.L_yes = L_yes
self.L_no = L_no
self.__numStages = numStages
def getNumSamples(self):
return self.L_yes.shape[0]
def getNumBiomarkers(self):
return self.L_no.shape[1]
def getNumStages(self):
return self.__numStages
def reindex(self, index):
return MixtureSustainData(self.L_yes[index,], self.L_no[index,], self.__numStages)
#*******************************************
#An implementation of the AbstractSustain class with mixture model based events
class MixtureSustain(AbstractSustain):
def __init__(self,
L_yes,
L_no,
biomarker_labels,
N_startpoints,
N_S_max,
N_iterations_MCMC,
output_folder,
dataset_name,
use_parallel_startpoints,
seed=None):
# The initializer for the mixture model based events implementation of AbstractSustain
# Parameters:
# L_yes - probability of positive class for all subjects across all biomarkers (from mixture modelling)
# dim: number of subjects x number of biomarkers
# L_no - probability of negative class for all subjects across all biomarkers (from mixture modelling)
# dim: number of subjects x number of biomarkers
# biomarker_labels - the names of the biomarkers as a list of strings
# N_startpoints - number of startpoints to use in maximum likelihood step of SuStaIn, typically 25
# N_S_max - maximum number of subtypes, should be 1 or more
# N_iterations_MCMC - number of MCMC iterations, typically 1e5 or 1e6 but can be lower for debugging
# output_folder - where to save pickle files, etc.
# dataset_name - for naming pickle files
# use_parallel_startpoints - boolean for whether or not to parallelize the maximum likelihood loop
# seed - random number seed
N = L_yes.shape[1] # number of biomarkers
assert (len(biomarker_labels) == N), "number of labels should match number of biomarkers"
self.biomarker_labels = biomarker_labels
numStages = L_yes.shape[1] #number of stages == number of biomarkers here
self.__sustainData = MixtureSustainData(L_yes, L_no, numStages)
super().__init__(self.__sustainData,
N_startpoints,
N_S_max,
N_iterations_MCMC,
output_folder,
dataset_name,
use_parallel_startpoints,
seed)
def _initialise_sequence(self, sustainData, rng):
# Randomly initialises a sequence
S = rng.permutation(sustainData.getNumStages()) #np.random.permutation(sustainData.getNumStages())
S = S.reshape(1, len(S))
return S
def _calculate_likelihood_stage(self, sustainData, S):
'''
Computes the likelihood of a single event based model
Inputs:
=======
sustainData - a MixtureData type that contains:
L_yes - likelihood an event has occurred in each subject
dim: number of subjects x number of biomarkers
L_no - likelihood an event has not occurred in each subject
dim: number of subjects x number of biomarkers
S - the current ordering of the z-score stages for a particular subtype
dim: 1 x number of events
Outputs:
========
p_perm_k - the probability of each subjects data at each stage of a particular subtype
in the SuStaIn model
'''
M = sustainData.getNumSamples()
N = sustainData.getNumStages()
S_int = S.astype(int)
arange_Np1 = np.arange(0, N+1)
p_perm_k = np.zeros((M, N+1))
#**** THIS VERSION IS ROUGHLY 10x FASTER THAN THE ONE BELOW
cp_yes = np.cumprod(sustainData.L_yes[:, S_int], 1)
cp_no = np.cumprod(sustainData.L_no[:, S_int[::-1]], 1) #do the cumulative product from the end of the sequence
# Even faster version to avoid loops
p_perm_k[:, 0] = cp_no[:, -1]
p_perm_k[:, 1:-1] = cp_no[:, :-1][:, ::-1] * cp_yes[:, :-1]
p_perm_k[:, -1] = cp_yes[:, -1]
p_perm_k *= 1 / (N + 1)
return p_perm_k
def _optimise_parameters(self, sustainData, S_init, f_init, rng):
# Optimise the parameters of the SuStaIn model
M = sustainData.getNumSamples()
N_S = S_init.shape[0]
N = sustainData.getNumStages()
S_opt = S_init.copy() # have to copy or changes will be passed to S_init
f_opt = np.array(f_init).reshape(N_S, 1, 1)
f_val_mat = np.tile(f_opt, (1, N + 1, M))
f_val_mat = np.transpose(f_val_mat, (2, 1, 0))
p_perm_k = np.zeros((M, N + 1, N_S))
for s in range(N_S):
p_perm_k[:, :, s] = self._calculate_likelihood_stage(sustainData, S_opt[s])
p_perm_k_weighted = p_perm_k * f_val_mat
# the second summation axis is different to Matlab version
#p_perm_k_norm = p_perm_k_weighted / np.tile(np.sum(np.sum(p_perm_k_weighted, 1), 1).reshape(M, 1, 1), (1, N + 1, N_S))
# adding 1e-250 fixes divide by zero problem that happens rarely
p_perm_k_norm = p_perm_k_weighted / np.sum(p_perm_k_weighted + 1e-250, axis=(1, 2), keepdims=True)
f_opt = (np.squeeze(sum(sum(p_perm_k_norm))) / sum(sum(sum(p_perm_k_norm)))).reshape(N_S, 1, 1)
f_val_mat = np.tile(f_opt, (1, N + 1, M))
f_val_mat = np.transpose(f_val_mat, (2, 1, 0))
order_seq = rng.permutation(N_S) #np.random.permutation(N_S) # this will produce different random numbers to Matlab
for s in order_seq:
order_bio = rng.permutation(N) #np.random.permutation(N) # this will produce different random numbers to Matlab
for i in order_bio:
current_sequence = S_opt[s]
assert(len(current_sequence)==N)
current_location = np.array([0] * N)
current_location[current_sequence.astype(int)] = np.arange(N)
selected_event = i
move_event_from = current_location[selected_event]
possible_positions = np.arange(N)
possible_sequences = np.zeros((len(possible_positions), N))
possible_likelihood = np.zeros((len(possible_positions), 1))
possible_p_perm_k = np.zeros((M, N + 1, len(possible_positions)))
for index in range(len(possible_positions)):
current_sequence = S_opt[s]
#choose a position in the sequence to move an event to
move_event_to = possible_positions[index]
#move this event in its new position
current_sequence = np.delete(current_sequence, move_event_from, 0) # this is different to the Matlab version, which call current_sequence(move_event_from) = []
new_sequence = np.concatenate([current_sequence[np.arange(move_event_to)], [selected_event], current_sequence[np.arange(move_event_to, N - 1)]])
possible_sequences[index, :] = new_sequence
possible_p_perm_k[:, :, index] = self._calculate_likelihood_stage(sustainData, new_sequence)
p_perm_k[:, :, s] = possible_p_perm_k[:, :, index]
total_prob_stage = np.sum(p_perm_k * f_val_mat, 2)
total_prob_subj = np.sum(total_prob_stage, 1)
possible_likelihood[index] = np.sum(np.log(total_prob_subj + 1e-250))
possible_likelihood = possible_likelihood.reshape(possible_likelihood.shape[0])
max_likelihood = max(possible_likelihood)
this_S = possible_sequences[possible_likelihood == max_likelihood, :]
this_S = this_S[0, :]
S_opt[s] = this_S
this_p_perm_k = possible_p_perm_k[:, :, possible_likelihood == max_likelihood]
p_perm_k[:, :, s] = this_p_perm_k[:, :, 0]
S_opt[s] = this_S
p_perm_k_weighted = p_perm_k * f_val_mat
p_perm_k_norm = p_perm_k_weighted / np.tile(np.sum(np.sum(p_perm_k_weighted, 1), 1).reshape(M, 1, 1), (1, N + 1, N_S)) # the second summation axis is different to Matlab version
f_opt = (np.squeeze(sum(sum(p_perm_k_norm))) / sum(sum(sum(p_perm_k_norm)))).reshape(N_S, 1, 1)
f_val_mat = np.tile(f_opt, (1, N + 1, M))
f_val_mat = np.transpose(f_val_mat, (2, 1, 0))
f_opt = f_opt.reshape(N_S)
total_prob_stage = np.sum(p_perm_k * f_val_mat, 2)
total_prob_subj = np.sum(total_prob_stage, 1)
likelihood_opt = np.sum( | np.log(total_prob_subj + 1e-250) | numpy.log |
import tensorflow as tf
from lingvo import model_imports
from lingvo import model_registry
import numpy as np
import scipy.io.wavfile as wav
import generate_masking_threshold as generate_mask
from tool import Transform, create_features, create_inputs
import time
from lingvo.core import cluster_factory
from absl import flags
from absl import app
# data directory
flags.DEFINE_string("root_dir", "./", "location of Librispeech")
flags.DEFINE_string('input', 'read_data.txt',
'Input audio .wav file(s), at 16KHz (separated by spaces)')
# data processing
flags.DEFINE_integer('window_size', '2048', 'window size in spectrum analysis')
flags.DEFINE_integer('max_length_dataset', '223200',
'the length of the longest audio in the whole dataset')
flags.DEFINE_float('initial_bound', '2000', 'initial l infinity norm for adversarial perturbation')
# training parameters
flags.DEFINE_string('checkpoint', "./model/ckpt-00908156",
'location of checkpoint')
flags.DEFINE_integer('batch_size', '5', 'batch size')
flags.DEFINE_float('lr_stage1', '100', 'learning_rate for stage 1')
flags.DEFINE_float('lr_stage2', '1', 'learning_rate for stage 2')
flags.DEFINE_integer('num_iter_stage1', '1000', 'number of iterations in stage 1')
flags.DEFINE_integer('num_iter_stage2', '4000', 'number of iterations in stage 2')
flags.DEFINE_integer('num_gpu', '0', 'which gpu to run')
FLAGS = flags.FLAGS
def ReadFromWav(data, batch_size):
"""
Returns:
audios_np: a numpy array of size (batch_size, max_length) in float
trans: a numpy array includes the targeted transcriptions (batch_size, )
th_batch: a numpy array of the masking threshold, each of size (?, 1025)
psd_max_batch: a numpy array of the psd_max of the original audio (batch_size)
max_length: the max length of the batch of audios
sample_rate_np: a numpy array
masks: a numpy array of size (batch_size, max_length)
masks_freq: a numpy array of size (batch_size, max_length_freq, 80)
lengths: a list of the length of original audios
"""
audios = []
lengths = []
th_batch = []
psd_max_batch = []
# read the .wav file
for i in range(batch_size):
sample_rate_np, audio_temp = wav.read(FLAGS.root_dir + str(data[0, i]))
# read the wav form range from [-32767, 32768] or [-1, 1]
if max(audio_temp) < 1:
audio_np = audio_temp * 32768
else:
audio_np = audio_temp
length = len(audio_np)
audios.append(audio_np)
lengths.append(length)
max_length = max(lengths)
# pad the input audio
audios_np = np.zeros([batch_size, max_length])
masks = np.zeros([batch_size, max_length])
lengths_freq = (np.array(lengths) // 2 + 1) // 240 * 3
max_length_freq = max(lengths_freq)
masks_freq = np.zeros([batch_size, max_length_freq, 80])
for i in range(batch_size):
audio_float = audios[i].astype(float)
audios_np[i, :lengths[i]] = audio_float
masks[i, :lengths[i]] = 1
masks_freq[i, :lengths_freq[i], :] = 1
# compute the masking threshold
th, psd_max = generate_mask.generate_th(audios_np[i], sample_rate_np, FLAGS.window_size)
th_batch.append(th)
psd_max_batch.append(psd_max)
th_batch = np.array(th_batch)
psd_max_batch = np.array(psd_max_batch)
# read the transcription
trans = data[2, :]
return audios_np, trans, th_batch, psd_max_batch, max_length, sample_rate_np, masks, masks_freq, lengths
class Attack:
def __init__(self, sess, batch_size=1,
lr_stage1=100, lr_stage2=0.1, num_iter_stage1=1000, num_iter_stage2=4000, th=None,
psd_max_ori=None):
self.sess = sess
self.num_iter_stage1 = num_iter_stage1
self.num_iter_stage2 = num_iter_stage2
self.batch_size = batch_size
self.lr_stage1 = lr_stage1
tf.compat.v1.set_random_seed(1234)
params = model_registry.GetParams('asr.librispeech.Librispeech960Wpm', 'Test')
params.random_seed = 1234
params.is_eval = True
params.cluster.worker.gpus_per_replica = 1
cluster = cluster_factory.Cluster(params.cluster)
with cluster, tf.device(cluster.GetPlacer()):
model = params.cls(params)
self.delta_large = tf.Variable(np.zeros((batch_size, FLAGS.max_length_dataset), dtype=np.float32), name='qq_delta')
# placeholders
self.input_tf = tf.compat.v1.placeholder(tf.float32, shape=[batch_size, None], name='qq_input')
self.tgt_tf = tf.compat.v1.placeholder(tf.string)
self.sample_rate_tf = tf.compat.v1.placeholder(tf.int32, name='qq_sample_rate')
self.th = tf.compat.v1.placeholder(tf.float32, shape=[batch_size, None, None], name='qq_th')
self.psd_max_ori = tf.compat.v1.placeholder(tf.float32, shape=[batch_size], name='qq_psd')
self.mask = tf.compat.v1.placeholder(dtype=np.float32, shape=[batch_size, None], name='qq_mask')
self.mask_freq = tf.compat.v1.placeholder(dtype=np.float32, shape=[batch_size, None, 80])
self.noise = tf.compat.v1.placeholder(np.float32, shape=[batch_size, None], name="qq_noise")
self.maxlen = tf.compat.v1.placeholder(np.int32)
self.lr_stage2 = tf.compat.v1.placeholder(np.float32)
# variable
self.rescale = tf.Variable(np.ones((batch_size,1), dtype=np.float32), name='qq_rescale')
self.alpha = tf.Variable(np.ones((batch_size), dtype=np.float32) * 0.05, name='qq_alpha')
# extract the delta
self.delta = tf.slice(tf.identity(self.delta_large), [0, 0], [batch_size, self.maxlen])
self.apply_delta = tf.clip_by_value(self.delta, -FLAGS.initial_bound, FLAGS.initial_bound) * self.rescale
self.new_input = self.apply_delta * self.mask + self.input_tf
self.pass_in = tf.clip_by_value(self.new_input + self.noise, -2**15, 2**15-1)
# generate the inputs that are needed for the lingvo model
self.features = create_features(self.pass_in, self.sample_rate_tf, self.mask_freq)
self.inputs = create_inputs(model, self.features, self.tgt_tf, self.batch_size, self.mask_freq)
task = model.GetTask()
metrics = task.FPropDefaultTheta(self.inputs)
# self.celoss with the shape (batch_size)
self.celoss = tf.compat.v1.get_collection("per_loss")[0]
self.decoded = task.Decode(self.inputs)
# compute the loss for masking threshold
self.loss_th_list = []
self.transform = Transform(FLAGS.window_size)
for i in range(self.batch_size):
logits_delta = self.transform((self.apply_delta[i, :]), (self.psd_max_ori)[i])
loss_th = tf.reduce_mean(input_tensor=tf.nn.relu(logits_delta - (self.th)[i]))
loss_th = tf.expand_dims(loss_th, axis=0)
self.loss_th_list.append(loss_th)
self.loss_th = tf.concat(self.loss_th_list, axis=0)
self.optimizer1 = tf.compat.v1.train.AdamOptimizer(self.lr_stage1)
self.optimizer2 = tf.compat.v1.train.AdamOptimizer(self.lr_stage2)
grad1, var1 = self.optimizer1.compute_gradients(self.celoss, [self.delta_large])[0]
grad21, var21 = self.optimizer2.compute_gradients(self.celoss, [self.delta_large])[0]
grad22, var22 = self.optimizer2.compute_gradients(self.alpha * self.loss_th, [self.delta_large])[0]
self.train1 = self.optimizer1.apply_gradients([(tf.sign(grad1), var1)])
self.train21 = self.optimizer2.apply_gradients([(grad21, var21)])
self.train22 = self.optimizer2.apply_gradients([(grad22, var22)])
self.train2 = tf.group(self.train21, self.train22)
def attack_stage1(self, audios, trans, th_batch, psd_max_batch, maxlen, sample_rate, masks, masks_freq, num_loop, data, lr_stage2):
sess = self.sess
# initialize and load the pretrained model
sess.run(tf.compat.v1.initializers.global_variables())
saver = tf.compat.v1.train.Saver([x for x in tf.compat.v1.global_variables() if x.name.startswith("librispeech")])
saver.restore(sess, FLAGS.checkpoint)
# reassign the variables
sess.run(tf.compat.v1.assign(self.rescale, np.ones((self.batch_size, 1), dtype=np.float32)))
sess.run(tf.compat.v1.assign(self.delta_large, np.zeros((self.batch_size, FLAGS.max_length_dataset), dtype=np.float32)))
#noise = np.random.normal(scale=2, size=audios.shape)
noise = | np.zeros(audios.shape) | numpy.zeros |
"""
Matching pennies environment.
"""
import gym
import numpy as np
from gym.spaces import Discrete, Tuple
from .common import OneHot
class IteratedMatchingPennies(gym.Env):
"""
A two-agent vectorized environment for the Matching Pennies game.
"""
NAME = 'IMP'
NUM_AGENTS = 2
NUM_ACTIONS = 2
NUM_STATES = 5
def __init__(self, max_steps):
self.max_steps = max_steps
self.payout_mat = np.array([[1, -1],[-1, 1]])
self.action_space = \
Tuple([Discrete(self.NUM_ACTIONS), Discrete(self.NUM_ACTIONS)])
self.observation_space = \
Tuple([OneHot(self.NUM_STATES), OneHot(self.NUM_STATES)])
self.step_count = None
def reset(self):
self.step_count = 0
init_state = | np.zeros(self.NUM_STATES) | numpy.zeros |
# -*- coding: utf-8 -*-
# Copyright 2020 the HERA Project
# Licensed under the MIT License
"""
abscal.py
---------
Calibrate measured visibility
data to a visibility model using
linearizations of the (complex)
antenna-based calibration equation:
V_ij,xy^data = g_i_x * conj(g_j_y) * V_ij,xy^model.
Complex-valued parameters are broken into amplitudes and phases as:
V_ij,xy^model = exp(eta_ij,xy^model + i * phi_ij,xy^model)
g_i_x = exp(eta_i_x + i * phi_i_x)
g_j_y = exp(eta_j_y + i * phi_j_y)
V_ij,xy^data = exp(eta_ij,xy^data + i * phi_ij,xy^data)
where {i,j} index antennas and {x,y} are the polarization of
the i-th and j-th antenna respectively.
"""
import os
from collections import OrderedDict as odict
import copy
import argparse
import numpy as np
import operator
from functools import reduce
from scipy import signal, interpolate, spatial
from scipy.optimize import brute, minimize
from pyuvdata import UVCal, UVData
import linsolve
import warnings
from . import version
from .apply_cal import calibrate_in_place
from .smooth_cal import pick_reference_antenna, rephase_to_refant
from .flag_utils import synthesize_ant_flags
from .noise import predict_noise_variance_from_autos
from . import utils
from . import redcal
from . import io
from . import apply_cal
from .datacontainer import DataContainer
from .utils import echo, polnum2str, polstr2num, reverse_bl, split_pol, split_bl, join_bl, join_pol
PHASE_SLOPE_SOLVERS = ['linfit', 'dft', 'ndim_fft'] # list of valid solvers for global_phase_slope_logcal
def abs_amp_logcal(model, data, wgts=None, verbose=True, return_gains=False, gain_ants=[]):
"""
calculate absolute (array-wide) gain amplitude scalar
with a linear solver using the logarithmically linearized equation:
ln|V_ij,xy^data / V_ij,xy^model| = eta_x + eta_y
where {i,j} index antenna numbers and {x,y} index polarizations
of the i-th and j-th antennas respectively.
Parameters:
-----------
model : visibility data of refence model, type=DataContainer
keys are antenna-pair + polarization tuples, Ex. (1, 2, 'nn').
values are complex ndarray visibilities.
these must be 2D arrays, with [0] axis indexing time
and [1] axis indexing frequency.
data : visibility data of measurements, type=DataContainer
keys are antenna pair + pol tuples (must match model), values are
complex ndarray visibilities matching shape of model
wgts : weights of data, type=DataContainer, [default=None]
keys are antenna pair + pol tuples (must match model), values are real floats
matching shape of model and data
return_gains : boolean. If True, convert result into a dictionary of gain waterfalls.
gain_ants : list of ant-pol tuples for return_gains dictionary
verbose : print output, type=boolean, [default=False]
Output:
-------
if not return_gains:
fit : dictionary with 'eta_{}' key for amplitude scalar for {} polarization,
which has the same shape as the ndarrays in the model
else:
gains: dictionary with gain_ants as keys and gain waterfall arrays as values
"""
echo("...configuring linsolve data for abs_amp_logcal", verbose=verbose)
# get keys from model and data dictionary
keys = sorted(set(model.keys()) & set(data.keys()))
# abs of amplitude ratio is ydata independent variable
ydata = odict([(k, np.log(np.abs(data[k] / model[k]))) for k in keys])
# make weights if None
if wgts is None:
wgts = odict()
for i, k in enumerate(keys):
wgts[k] = np.ones_like(ydata[k], dtype=np.float)
# fill nans and infs
fill_dict_nans(ydata, wgts=wgts, nan_fill=0.0, inf_fill=0.0)
# setup linsolve equations
# a{} is a dummy variable to prevent linsolve from overwriting repeated measurements
eqns = odict([(k, "a{}*eta_{}+a{}*eta_{}".format(i, split_pol(k[-1])[0],
i, split_pol(k[-1])[1])) for i, k in enumerate(keys)])
ls_design_matrix = odict([("a{}".format(i), 1.0) for i, k in enumerate(keys)])
# setup linsolve dictionaries
ls_data = odict([(eqns[k], ydata[k]) for i, k in enumerate(keys)])
ls_wgts = odict([(eqns[k], wgts[k]) for i, k in enumerate(keys)])
# setup linsolve and run
sol = linsolve.LinearSolver(ls_data, wgts=ls_wgts, **ls_design_matrix)
echo("...running linsolve", verbose=verbose)
fit = sol.solve()
echo("...finished linsolve", verbose=verbose)
if not return_gains:
return fit
else:
return {ant: np.exp(fit['eta_{}'.format(ant[1])]).astype(np.complex) for ant in gain_ants}
def abs_amp_lincal(model, data, wgts=None, verbose=True, return_gains=False, gain_ants=[],
conv_crit=None, maxiter=100):
"""
calculate absolute (array-wide) gain amplitude scalar
with a linear (or linearized) solver using the equation:
V_ij,xy^data = A_x A_y * V_ij,xy^model
where {i,j} index antenna numbers and {x,y} index polarizations
of the i-th and j-th antennas respectively. When no cross-polarized
visibilities are involved, A^2 is solved for linearly for both real
and imaginary parts simultaneously as separate equations. Otherwise,
we have to use a linear-product solving algorithm, using abs_amp_logcal
as a starting point.
Parameters:
-----------
model : visibility data of refence model, type=DataContainer
keys are antenna-pair + polarization tuples, Ex. (1, 2, 'nn').
values are complex ndarray visibilities.
these must be 2D arrays, with [0] axis indexing time
and [1] axis indexing frequency.
data : visibility data of measurements, type=DataContainer
keys are antenna pair + pol tuples (must match model), values are
complex ndarray visibilities matching shape of model
wgts : weights of data, type=DataContainer, [default=None]
keys are antenna pair + pol tuples (must match model), values are real floats
matching shape of model and data
return_gains : boolean. If True, convert result into a dictionary of gain waterfalls.
gain_ants : list of ant-pol tuples for return_gains dictionary
conv_crit : A convergence criterion below which to stop iterating LinProductSolver.
Converegence is measured L2-norm of the change in the solution of the
variables divided by the L2-norm of the solution itself.
Default: None (resolves to machine precision for inferred dtype).
Note: only used when data and model include cross-polarized visibilities.
maxiter : Integer maximum number of iterations to perform LinProductSolver.
Note: only used when data and model include cross-polarized visibilities.
verbose : print output, type=boolean, [default=False]
Output:
-------
if not return_gains:
fit : dictionary with 'A_{}' key for amplitude scalar for {} polarization,
which has the same shape as the ndarrays in the model
else:
gains: dictionary with gain_ants as keys and gain waterfall arrays as values
"""
echo("...configuring linsolve data for abs_amp_lincal", verbose=verbose)
# get keys from model and data dictionary
keys = sorted(set(model.keys()) & set(data.keys()))
# check to see whether any cross-polarizations are being used (this will require a different solver)
cross_pols_used = False
for k in keys:
ant0, ant1 = split_bl(k)
if ant0[1] != ant1[1]:
cross_pols_used = True
break
# make weights if None
if wgts is None:
wgts = odict()
for i, k in enumerate(keys):
wgts[k] = np.ones_like(data[k], dtype=np.float)
# fill nans and infs, minimally duplicating data to save memory
data_here = {}
model_here = {}
for k in keys:
if np.any(~np.isfinite(data[k])):
data_here[k] = copy.deepcopy(data[k])
fill_dict_nans(data_here[k], wgts=wgts[k], nan_fill=0.0, inf_fill=0.0, array=True)
else:
data_here[k] = data[k]
if np.any(~np.isfinite(model[k])):
model_here[k] = copy.deepcopy(model[k])
fill_dict_nans(model_here[k], wgts=wgts[k], nan_fill=0.0, inf_fill=0.0, array=True)
else:
model_here[k] = model[k]
# setup linsolve equations, either for A (if cross_pols_used) or A^2
ls_data = {}
ls_wgts = {}
ls_consts = {}
for i, k in enumerate(keys):
pol0, pol1 = split_pol(k[-1])
if cross_pols_used:
re_eq_str = f'model_re_{i}*A_{pol0}*A_{pol1}'
im_eq_str = f'model_im_{i}*A_{pol0}*A_{pol1}'
else:
re_eq_str = f'model_re_{i}*Asq_{pol0}'
im_eq_str = f'model_im_{i}*Asq_{pol0}'
ls_data[re_eq_str] = np.real(data_here[k])
ls_wgts[re_eq_str] = wgts[k]
ls_consts[f'model_re_{i}'] = np.real(model_here[k])
ls_data[im_eq_str] = np.imag(data_here[k])
ls_wgts[im_eq_str] = wgts[k]
ls_consts[f'model_im_{i}'] = np.imag(model_here[k])
# setup linsolve and run
echo("...running linsolve", verbose=verbose)
if cross_pols_used:
# use abs_amp_logcal to get a starting point solution
sol0 = abs_amp_logcal(model, data, wgts=wgts)
sol0 = {k.replace('eta_', 'A_'): np.exp(sol) for k, sol in sol0.items()}
# now solve by linearizing
solver = linsolve.LinProductSolver(ls_data, sol0, wgts=ls_wgts, constants=ls_consts)
meta, fit = solver.solve_iteratively(conv_crit=conv_crit, maxiter=maxiter)
else:
# in this case, the equations are already linear in A^2
solver = linsolve.LinearSolver(ls_data, wgts=ls_wgts, constants=ls_consts)
fit = solver.solve()
fit = {k.replace('Asq', 'A'): np.sqrt(np.abs(sol)) for k, sol in fit.items()}
echo("...finished linsolve", verbose=verbose)
if not return_gains:
return fit
else:
return {ant: np.abs(fit[f'A_{ant[1]}']).astype(np.complex) for ant in gain_ants}
def _count_nDims(antpos, assume_2D=True):
'''Antenna position dimension counter helper function used in solvers that support higher-dim abscal.'''
nDims = len(list(antpos.values())[0])
for k in antpos.keys():
assert len(antpos[k]) == nDims, 'Not all antenna positions have the same dimensionality.'
if assume_2D:
assert len(antpos[k]) >= 2, 'Since assume_2D is True, all antenna positions must 2D or higher.'
return nDims
def TT_phs_logcal(model, data, antpos, wgts=None, refant=None, assume_2D=True,
zero_psi=True, four_pol=False, verbose=True, return_gains=False, gain_ants=[]):
"""
calculate overall gain phase and gain phase Tip-Tilt slopes (East-West and North-South)
with a linear solver applied to the logarithmically linearized equation:
angle(V_ij,xy^data / V_ij,xy^model) = angle(g_i_x * conj(g_j_y))
= psi_x - psi_y + Phi^ew_x*r_i^ew + Phi^ns_x*r_i^ns
- Phi^ew_y*r_j^ew - Phi^ns_y*r_j^ns
where psi is the overall gain phase across the array [radians] for x and y polarizations,
and PHI^ew, PHI^ns are the gain phase slopes across the east-west and north-south axes
of the array in units of [radians / meter], where x and y denote the pol of the i-th and j-th
antenna respectively. The phase slopes are polarization independent by default (1pol & 2pol cal),
but can be merged with the four_pol parameter (4pol cal). r_i is the antenna position vector
of the i^th antenna.
If assume_2D is not true, this solves for the tip-tilt degeneracies of antenna positions in an
arbitary number of dimensions, the output of redcal.reds_to_antpos() for an array with extra
tip-tilt degeneracies. In that case, the fit parameters are Phi_0, Phi_1, Phi_2, etc.,
generalizing the equation above to use the n-dimensional dot product Phi . r.
Parameters:
-----------
model : visibility data of refence model, type=DataContainer
keys are antenna-pair + polarization tuples, Ex. (1, 2, 'nn').
values are complex ndarray visibilities.
these must 2D arrays, with [0] axis indexing time
and [1] axis indexing frequency.
data : visibility data of measurements, type=DataContainer
keys are antenna pair + pol tuples (must match model), values are
complex ndarray visibilities matching shape of model
wgts : weights of data, type=DataContainer, [default=None]
keys are antenna pair + pol tuples (must match model), values are real floats
matching shape of model and data
refant : antenna number integer to use as a reference,
The antenna position coordaintes are centered at the reference, such that its phase
is identically zero across all frequencies. If None, use the first key in data as refant.
antpos : antenna position vectors, type=dictionary
keys are antenna integers, values are antenna positions vectors
(preferably centered at center of array). If assume_2D is True, it is assumed that the
[0] index contains the east-west separation and [1] index the north-south separation
assume_2D : type=boolean, [default=False]
If this is true, all dimensions of antpos beyond the first two will be ignored.
If return_gains is False and assume_2D is False, then the returned variables will
look like Phi_0, Phi_1, Phi_2, etc. corresponding to the dimensions in antpos.
zero_psi : set psi to be identically zero in linsolve eqns, type=boolean, [default=False]
four_pol : type=boolean, even if multiple polarizations are present in data, make free
variables polarization un-aware: i.e. one solution across all polarizations.
This is the same assumption as 4-polarization calibration in omnical.
verbose : print output, type=boolean, [default=False]
return_gains : boolean. If True, convert result into a dictionary of gain waterfalls.
gain_ants : list of ant-pol tuples for return_gains dictionary
Output:
-------
if not return_gains:
fit : dictionary with psi key for overall gain phase and Phi_ew and Phi_ns array containing
phase slopes across the EW and NS directions of the array. There is a set of each
of these variables per polarization. If assume_2D is False, then these will be the
more general Phi_0, Phi_1, Phi_2, etc. corresponding to the dimensions in antpos.
else:
gains: dictionary with gain_ants as keys and gain waterfall arrays as values
"""
echo("...configuring linsolve data for TT_phs_logcal", verbose=verbose)
# get keys from model dictionary
keys = sorted(set(model.keys()) & set(data.keys()))
antnums = np.unique(list(antpos.keys()))
# angle of phs ratio is ydata independent variable
# angle after divide
ydata = {k: np.angle(data[k] / model[k]) for k in keys}
# make unit weights if None
if wgts is None:
wgts = {k: np.ones_like(ydata[k], dtype=np.float) for k in keys}
# fill nans and infs
fill_dict_nans(ydata, wgts=wgts, nan_fill=0.0, inf_fill=0.0)
# center antenna positions about the reference antenna
if refant is None:
refant = keys[0][0]
assert refant in antnums, "reference antenna {} not found in antenna list".format(refant)
antpos = {k: antpos[k] - antpos[refant] for k in antpos.keys()}
# count dimensions of antenna positions, figure out how many to solve for
nDims = _count_nDims(antpos, assume_2D=assume_2D)
# setup linsolve equations
eqns = {}
for k in keys:
ap0, ap1 = split_pol(k[2])
eqns[k] = f'psi_{ap0}*a1 - psi_{ap1}*a2'
for d in range((nDims, 2)[assume_2D]):
if four_pol:
eqns[k] += f' + Phi_{d}*r_{d}_{k[0]} - Phi_{d}*r_{d}_{k[1]}'
else:
eqns[k] += f' + Phi_{d}_{ap0}*r_{d}_{k[0]} - Phi_{d}_{ap1}*r_{d}_{k[1]}'
# set design matrix entries
ls_design_matrix = {}
for a in antnums:
for d in range((nDims, 2)[assume_2D]):
ls_design_matrix[f'r_{d}_{a}'] = antpos[a][d]
if zero_psi:
ls_design_matrix.update({"a1": 0.0, "a2": 0.0})
else:
ls_design_matrix.update({"a1": 1.0, "a2": 1.0})
# setup linsolve dictionaries
ls_data = {eqns[k]: ydata[k] for k in keys}
ls_wgts = {eqns[k]: wgts[k] for k in keys}
# setup linsolve and run
sol = linsolve.LinearSolver(ls_data, wgts=ls_wgts, **ls_design_matrix)
echo("...running linsolve", verbose=verbose)
fit = sol.solve()
echo("...finished linsolve", verbose=verbose)
if not return_gains:
# rename variables ew/ns instead of 0/1 to maintain backwards compatability
if assume_2D:
params = list(fit.keys())
for p in params:
if 'Phi_0' in p:
fit[p.replace('Phi_0', 'Phi_ew')] = fit[p]
del fit[p]
if 'Phi_1' in p:
fit[p.replace('Phi_1', 'Phi_ns')] = fit[p]
del fit[p]
return fit
else:
# compute gains, dotting each parameter into the corresponding coordinate in that dimension
gains = {}
for ant in gain_ants:
gains[ant] = np.exp(1.0j * fit['psi_{}'.format(ant[1])])
if four_pol:
Phis = [fit[f'Phi_{d}'] for d in range((nDims, 2)[assume_2D])]
else:
Phis = [fit[f'Phi_{d}_{ant[1]}'] for d in range((nDims, 2)[assume_2D])]
gains[ant] *= np.exp(1.0j * (np.einsum('i,ijk->jk', antpos[ant[0]][0:len(Phis)], Phis)))
return gains
def amp_logcal(model, data, wgts=None, verbose=True):
"""
calculate per-antenna gain amplitude via the
logarithmically linearized equation
ln|V_ij,xy^data / V_ij,xy^model| = ln|g_i_x| + ln|g_j_y|
= eta_i_x + eta_j_y
where {x,y} represent the polarization of the i-th and j-th antenna
respectively.
Parameters:
-----------
model : visibility data of refence model, type=DataContainer
keys are antenna-pair + polarization tuples, Ex. (1, 2, 'nn').
values are complex ndarray visibilities.
these must 2D arrays, with [0] axis indexing time
and [1] axis indexing frequency.
data : visibility data of measurements, type=DataContainer
keys are antenna pair + pol tuples (must match model), values are
complex ndarray visibilities matching shape of model
wgts : weights of data, type=DataContainer, [default=None]
keys are antenna pair + pol tuples (must match model), values are real floats
matching shape of model and data
Output:
-------
fit : dictionary containing eta_i = ln|g_i| for each antenna
"""
echo("...configuring linsolve data for amp_logcal", verbose=verbose)
# get keys from model dictionary
keys = sorted(set(model.keys()) & set(data.keys()))
# difference of log-amplitudes is ydata independent variable
ydata = odict([(k, np.log(np.abs(data[k] / model[k]))) for k in keys])
# make weights if None
if wgts is None:
wgts = odict()
for i, k in enumerate(keys):
wgts[k] = np.ones_like(ydata[k], dtype=np.float)
# fill nans and infs
fill_dict_nans(ydata, wgts=wgts, nan_fill=0.0, inf_fill=0.0)
# setup linsolve equations
eqns = odict([(k, "eta_{}_{} + eta_{}_{}".format(k[0], split_pol(k[-1])[0],
k[1], split_pol(k[-1])[1])) for i, k in enumerate(keys)])
ls_design_matrix = odict()
# setup linsolve dictionaries
ls_data = odict([(eqns[k], ydata[k]) for i, k in enumerate(keys)])
ls_wgts = odict([(eqns[k], wgts[k]) for i, k in enumerate(keys)])
# setup linsolve and run
sol = linsolve.LinearSolver(ls_data, wgts=ls_wgts, **ls_design_matrix)
echo("...running linsolve", verbose=verbose)
fit = sol.solve()
echo("...finished linsolve", verbose=verbose)
return fit
def phs_logcal(model, data, wgts=None, refant=None, verbose=True):
"""
calculate per-antenna gain phase via the
logarithmically linearized equation
angle(V_ij,xy^data / V_ij,xy^model) = angle(g_i_x) - angle(g_j_y)
= phi_i_x - phi_j_y
where {x,y} represent the pol of the i-th and j-th antenna respectively.
Parameters:
-----------
model : visibility data of refence model, type=DataContainer
keys are antenna-pair + polarization tuples, Ex. (1, 2, 'nn').
values are complex ndarray visibilities.
these must 2D arrays, with [0] axis indexing time
and [1] axis indexing frequency.
data : visibility data of measurements, type=DataContainer
keys are antenna pair + pol tuples (must match model), values are
complex ndarray visibilities matching shape of model
wgts : weights of data, type=DataContainer, [default=None]
keys are antenna pair + pol tuples (must match model), values are real floats
matching shape of model and data
refant : integer antenna number of reference antenna, defult=None
The refant phase will be set to identically zero in the linear equations.
By default this takes the first antenna in data.
Output:
-------
fit : dictionary containing phi_i = angle(g_i) for each antenna
"""
echo("...configuring linsolve data for phs_logcal", verbose=verbose)
# get keys from match between data and model dictionary
keys = sorted(set(model.keys()) & set(data.keys()))
# angle of visibility ratio is ydata independent variable
ydata = odict([(k, np.angle(data[k] / model[k])) for k in keys])
# make weights if None
if wgts is None:
wgts = odict()
for i, k in enumerate(keys):
wgts[k] = np.ones_like(ydata[k], dtype=np.float)
# fill nans and infs
fill_dict_nans(ydata, wgts=wgts, nan_fill=0.0, inf_fill=0.0)
# setup linsolve equations
eqns = odict([(k, "phi_{}_{} - phi_{}_{}".format(k[0], split_pol(k[2])[0],
k[1], split_pol(k[2])[1])) for i, k in enumerate(keys)])
ls_design_matrix = odict()
# setup linsolve dictionaries
ls_data = odict([(eqns[k], ydata[k]) for i, k in enumerate(keys)])
ls_wgts = odict([(eqns[k], wgts[k]) for i, k in enumerate(keys)])
# get unique gain polarizations
gain_pols = np.unique(list(map(lambda k: list(split_pol(k[2])), keys)))
# set reference antenna phase to zero
if refant is None:
refant = keys[0][0]
assert np.array(list(map(lambda k: refant in k, keys))).any(), "refant {} not found in data and model".format(refant)
for p in gain_pols:
ls_data['phi_{}_{}'.format(refant, p)] = np.zeros_like(list(ydata.values())[0])
ls_wgts['phi_{}_{}'.format(refant, p)] = np.ones_like(list(wgts.values())[0])
# setup linsolve and run
sol = linsolve.LinearSolver(ls_data, wgts=ls_wgts, **ls_design_matrix)
echo("...running linsolve", verbose=verbose)
fit = sol.solve()
echo("...finished linsolve", verbose=verbose)
return fit
def delay_lincal(model, data, wgts=None, refant=None, df=9.765625e4, f0=0., solve_offsets=True, medfilt=True,
kernel=(1, 5), verbose=True, antpos=None, four_pol=False, edge_cut=0):
"""
Solve for per-antenna delays according to the equation
delay(V_ij,xy^data / V_ij,xy^model) = delay(g_i_x) - delay(g_j_y)
Can also solve for per-antenna phase offsets with the solve_offsets kwarg.
Parameters:
-----------
model : visibility data of refence model, type=DataContainer
keys are antenna-pair + polarization tuples, Ex. (1, 2, 'nn').
values are complex ndarray visibilities.
these must 2D arrays, with [0] axis indexing time
and [1] axis indexing frequency.
data : visibility data of measurements, type=DataContainer
keys are antenna pair + pol tuples (must match model), values are
complex ndarray visibilities matching shape of model
wgts : weights of data, type=DataContainer, [default=None]
keys are antenna pair + pol tuples (must match model), values are real floats
matching shape of model and data. These are only used to find delays from
itegrations that are unflagged for at least two frequency bins. In this case,
the delays are assumed to have equal weight, otherwise the delays take zero weight.
refant : antenna number integer to use as reference
Set the reference antenna to have zero delay, such that its phase is set to identically
zero across all freqs. By default use the first key in data.
df : type=float, frequency spacing between channels in Hz
f0 : type=float, frequency of the first channel in the data (used for offsets)
medfilt : type=boolean, median filter visiblity ratio before taking fft
kernel : type=tuple, dtype=int, kernel for multi-dimensional median filter
antpos : type=dictionary, antpos dictionary. antenna num as key, position vector as value.
four_pol : type=boolean, if True, fit multiple polarizations together
edge_cut : int, number of channels to exclude at each band edge in FFT window
Output:
-------
fit : dictionary containing delay (tau_i_x) for each antenna and optionally
offset (phi_i_x) for each antenna.
"""
echo("...configuring linsolve data for delay_lincal", verbose=verbose)
# get shared keys
keys = sorted(set(model.keys()) & set(data.keys()))
# make wgts
if wgts is None:
wgts = odict()
for i, k in enumerate(keys):
wgts[k] = np.ones_like(data[k], dtype=np.float)
# median filter and FFT to get delays
ratio_delays = []
ratio_offsets = []
ratio_wgts = []
for i, k in enumerate(keys):
ratio = data[k] / model[k]
# replace nans
nan_select = np.isnan(ratio)
ratio[nan_select] = 0.0
wgts[k][nan_select] = 0.0
# replace infs
inf_select = np.isinf(ratio)
ratio[inf_select] = 0.0
wgts[k][inf_select] = 0.0
# get delays
dly, offset = utils.fft_dly(ratio, df, f0=f0, wgts=wgts[k], medfilt=medfilt, kernel=kernel, edge_cut=edge_cut)
# set nans to zero
rwgts = np.nanmean(wgts[k], axis=1, keepdims=True)
isnan = np.isnan(dly)
dly[isnan] = 0.0
rwgts[isnan] = 0.0
offset[isnan] = 0.0
ratio_delays.append(dly)
ratio_offsets.append(offset)
ratio_wgts.append(rwgts)
ratio_delays = np.array(ratio_delays)
ratio_offsets = np.array(ratio_offsets)
ratio_wgts = np.array(ratio_wgts)
# form ydata
ydata = odict(zip(keys, ratio_delays))
# form wgts
ywgts = odict(zip(keys, ratio_wgts))
# setup linsolve equation dictionary
eqns = odict([(k, 'tau_{}_{} - tau_{}_{}'.format(k[0], split_pol(k[2])[0],
k[1], split_pol(k[2])[1])) for i, k in enumerate(keys)])
# setup design matrix dictionary
ls_design_matrix = odict()
# setup linsolve data dictionary
ls_data = odict([(eqns[k], ydata[k]) for i, k in enumerate(keys)])
ls_wgts = odict([(eqns[k], ywgts[k]) for i, k in enumerate(keys)])
# get unique gain polarizations
gain_pols = np.unique(list(map(lambda k: [split_pol(k[2])[0], split_pol(k[2])[1]], keys)))
# set reference antenna phase to zero
if refant is None:
refant = keys[0][0]
assert np.array(list(map(lambda k: refant in k, keys))).any(), "refant {} not found in data and model".format(refant)
for p in gain_pols:
ls_data['tau_{}_{}'.format(refant, p)] = np.zeros_like(list(ydata.values())[0])
ls_wgts['tau_{}_{}'.format(refant, p)] = np.ones_like(list(ywgts.values())[0])
# setup linsolve and run
sol = linsolve.LinearSolver(ls_data, wgts=ls_wgts, **ls_design_matrix)
echo("...running linsolve", verbose=verbose)
fit = sol.solve()
echo("...finished linsolve", verbose=verbose)
# setup linsolve parameters
ydata = odict(zip(keys, ratio_offsets))
eqns = odict([(k, 'phi_{}_{} - phi_{}_{}'.format(k[0], split_pol(k[2])[0],
k[1], split_pol(k[2])[1])) for i, k in enumerate(keys)])
ls_data = odict([(eqns[k], ydata[k]) for i, k in enumerate(keys)])
ls_wgts = odict([(eqns[k], ywgts[k]) for i, k in enumerate(keys)])
ls_design_matrix = odict()
for p in gain_pols:
ls_data['phi_{}_{}'.format(refant, p)] = np.zeros_like(list(ydata.values())[0])
ls_wgts['phi_{}_{}'.format(refant, p)] = np.ones_like(list(ywgts.values())[0])
sol = linsolve.LinearSolver(ls_data, wgts=ls_wgts, **ls_design_matrix)
echo("...running linsolve", verbose=verbose)
offset_fit = sol.solve()
echo("...finished linsolve", verbose=verbose)
fit.update(offset_fit)
return fit
def delay_slope_lincal(model, data, antpos, wgts=None, refant=None, df=9.765625e4, f0=0.0, medfilt=True,
kernel=(1, 5), assume_2D=True, four_pol=False, edge_cut=0, time_avg=False,
return_gains=False, gain_ants=[], verbose=True):
"""
Solve for an array-wide delay slope according to the equation
delay(V_ij,xy^data / V_ij,xy^model) = dot(T_x, r_i) - dot(T_y, r_j)
This does not solve for per-antenna delays, but rather a delay slope across the array.
Parameters:
-----------
model : visibility data of refence model, type=DataContainer
keys are antenna-pair + polarization tuples, Ex. (1, 2, 'nn').
values are complex ndarray visibilities.
these must 2D arrays, with [0] axis indexing time
and [1] axis indexing frequency.
data : visibility data of measurements, type=DataContainer
keys are antenna pair + pol tuples (must match model), values are
complex ndarray visibilities matching shape of model
antpos : type=dictionary, antpos dictionary. antenna num as key, position vector as value.
wgts : weights of data, type=DataContainer, [default=None]
keys are antenna pair + pol tuples (must match model), values are real floats
matching shape of model and data. These are only used to find delays from
itegrations that are unflagged for at least two frequency bins. In this case,
the delays are assumed to have equal weight, otherwise the delays take zero weight.
refant : antenna number integer to use as a reference,
The antenna position coordaintes are centered at the reference, such that its phase
is identically zero across all frequencies. If None, use the first key in data as refant.
df : type=float, frequency spacing between channels in Hz
f0 : type=float, frequency of 0th channel in Hz.
Optional, but used to get gains without a delay offset.
medfilt : type=boolean, median filter visiblity ratio before taking fft
kernel : type=tuple, dtype=int, kernel for multi-dimensional median filter
assume_2D : type=boolean, [default=False]
If this is true, all dimensions of antpos beyond the first two will be ignored.
If return_gains is False and assume_2D is False, then the returned variables will
look like T_0, T_1, T_2, etc. corresponding to the dimensions in antpos.
four_pol : type=boolean, if True, fit multiple polarizations together
edge_cut : int, number of channels to exclude at each band edge of vis in FFT window
time_avg : boolean, if True, replace resultant antenna delay slope with the median across time
return_gains : boolean. If True, convert result into a dictionary of gain waterfalls.
gain_ants : list of ant-pol tuples for return_gains dictionary
Output:
-------
if not return_gains:
fit : dictionary containing delay slope (T_x) for each pol [seconds / meter].
If assume_2D is False, then these will be the more general T_0, T_1, T_2, etc.
corresponding to the dimensions in antpos, instead of T_ew or T_ns.
else:
gains: dictionary with gain_ants as keys and gain waterfall arrays as values
"""
echo("...configuring linsolve data for delay_slope_lincal", verbose=verbose)
# get shared keys
keys = sorted(set(model.keys()) & set(data.keys()))
antnums = np.unique(list(antpos.keys()))
# make unit wgts if None
if wgts is None:
wgts = {k: np.ones_like(data[k], dtype=np.float) for k in keys}
# center antenna positions about the reference antenna
if refant is None:
refant = keys[0][0]
assert refant in antnums, "reference antenna {} not found in antenna list".format(refant)
antpos = {k: antpos[k] - antpos[refant] for k in antpos.keys()}
# count dimensions of antenna positions, figure out how many to solve for
nDims = _count_nDims(antpos, assume_2D=assume_2D)
# median filter and FFT to get delays
ydata = {}
ywgts = {}
for i, k in enumerate(keys):
ratio = data[k] / model[k]
ratio /= np.abs(ratio)
# replace nans and infs
wgts[k][~np.isfinite(ratio)] = 0.0
ratio[~np.isfinite(ratio)] = 0.0
# get delays
ydata[k], _ = utils.fft_dly(ratio, df, wgts=wgts[k], f0=f0, medfilt=medfilt, kernel=kernel, edge_cut=edge_cut)
# set nans to zero
ywgts[k] = np.nanmean(wgts[k], axis=1, keepdims=True)
isnan = np.isnan(ydata[k])
ydata[k][isnan] = 0.0
ywgts[k][isnan] = 0.0
# setup antenna position terms
r_ew = {a: f"r_ew_{a}" for a in antnums}
r_ns = {a: f"r_ns_{a}" for a in antnums}
# setup linsolve equations
eqns = {k: '' for k in keys}
for k in keys:
ap0, ap1 = split_pol(k[2])
for d in range((nDims, 2)[assume_2D]):
if len(eqns[k]) > 0:
eqns[k] += ' + '
if four_pol:
eqns[k] += f'T_{d}*r_{d}_{k[0]} - T_{d}*r_{d}_{k[1]}'
else:
eqns[k] += f'T_{d}_{ap0}*r_{d}_{k[0]} - T_{d}_{ap1}*r_{d}_{k[1]}'
# set design matrix entries
ls_design_matrix = {}
for a in antnums:
for d in range((nDims, 2)[assume_2D]):
ls_design_matrix[f'r_{d}_{a}'] = antpos[a][d]
# setup linsolve data dictionary
ls_data = {eqns[k]: ydata[k] for k in keys}
ls_wgts = {eqns[k]: ywgts[k] for k in keys}
# setup linsolve and run
sol = linsolve.LinearSolver(ls_data, wgts=ls_wgts, **ls_design_matrix)
echo("...running linsolve", verbose=verbose)
fit = sol.solve()
echo("...finished linsolve", verbose=verbose)
# time average
if time_avg:
Ntimes = list(fit.values())[0].shape[0]
for k in fit:
fit[k] = np.repeat(np.moveaxis(np.median(fit[k], axis=0)[np.newaxis], 0, 0), Ntimes, axis=0)
if not return_gains:
# rename variables ew/ns instead of 0/1 to maintain backwards compatability
if assume_2D:
params = list(fit.keys())
for p in params:
if 'T_0' in p:
fit[p.replace('T_0', 'T_ew')] = fit[p]
del fit[p]
if 'T_1' in p:
fit[p.replace('T_1', 'T_ns')] = fit[p]
del fit[p]
return fit
else:
gains = {}
for ant in gain_ants:
# construct delays from delay slopes
if four_pol:
Taus = [fit[f'T_{d}'] for d in range((nDims, 2)[assume_2D])]
else:
Taus = [fit[f'T_{d}_{ant[1]}'] for d in range((nDims, 2)[assume_2D])]
delays = np.einsum('ijk,i->j', Taus, antpos[ant[0]][0:len(Taus)])
# construct gains from freqs and delays
freqs = f0 + np.arange(list(data.values())[0].shape[1]) * df
gains[ant] = np.exp(2.0j * np.pi * np.outer(delays, freqs))
return gains
def dft_phase_slope_solver(xs, ys, data, flags=None):
'''Solve for spatial phase slopes across an array by looking for the peak in the DFT.
This is analogous to the method in utils.fft_dly(), except its in 2D and does not
assume a regular grid for xs and ys.
Arguments:
xs: 1D array of x positions (e.g. of antennas or baselines)
ys: 1D array of y positions (must be same length as xs)
data: ndarray of complex numbers to fit with a phase slope. The first dimension must match
xs and ys, but subsequent dimensions will be preserved and solved independently.
Any np.nan in data is interpreted as a flag.
flags: optional array of flags of data not to include in the phase slope solver.
Returns:
slope_x, slope_y: phase slopes in units of radians/[xs] where the best fit phase slope plane
is np.exp(2.0j * np.pi * (xs * slope_x + ys * slope_y)). Both have the same shape
the data after collapsing along the first dimension.
'''
# use the minimum and maximum difference between positions to define the search range and sampling in Fourier space
deltas = [((xi - xj)**2 + (yi - yj)**2)**.5 for i, (xi, yi) in enumerate(zip(xs, ys))
for (xj, yj) in zip(xs[i + 1:], ys[i + 1:])]
search_slice = slice(-1.0 / np.min(deltas), 1.0 / np.min(deltas), 1.0 / np.max(deltas))
# define cost function
def dft_abs(k, x, y, z):
return -np.abs(np.dot(z, np.exp(-2.0j * np.pi * (x * k[0] + y * k[1]))))
# set up flags, treating nans as flags
if flags is None:
flags = np.zeros_like(data, dtype=bool)
flags = flags | np.isnan(data)
# loop over data, minimizing the cost function
dflat = data.reshape((len(xs), -1))
fflat = flags.reshape((len(xs), -1))
slope_x = np.zeros_like(dflat[0, :].real)
slope_y = np.zeros_like(dflat[0, :].real)
for i in range(dflat.shape[1]):
if not np.all(np.isnan(dflat[:, i])):
dft_peak = brute(dft_abs, (search_slice, search_slice),
(xs[~fflat[:, i]], ys[~fflat[:, i]],
dflat[:, i][~fflat[:, i]]), finish=minimize)
slope_x[i] = dft_peak[0]
slope_y[i] = dft_peak[1]
return 2 * np.pi * slope_x.reshape(data.shape[1:]), 2 * np.pi * slope_y.reshape(data.shape[1:])
def ndim_fft_phase_slope_solver(data, bl_vecs, assume_2D=True, zero_pad=2, bl_error_tol=1.0):
'''Find phase slopes across the array in the data. Similar to utils.fft_dly,
but can grid arbitarary bl_vecs in N dimensions (for example, when using
generealized antenna positions from redcal.reds_to_antpos in arrays with
extra degeneracies).
Parameters:
-----------
data : dictionary or DataContainer mapping keys to (complex) ndarrays.
All polarizations are treated equally and solved for together.
bl_vecs : dictionary mapping keys in data to vectors in N dimensions
assume_2D : if True, assume N == 2 and only use the first two dimensions of bl_vecs.
zero_pad : float factor by which to expand the grid onto which the data is binned.
Increases resolution in Fourier space at the cost of runtime/memory.
Must be >= 1.
bl_error_tol : float used to define non-zero elements of baseline vectors.
This helps set the fundamental resolution of the grid.
Output:
-------
phase_slopes : list of length N dimensions. Each element is the same shape
as each entry in data. Contains the phase gradients in units
of 1 / [bl_vecs].
'''
nDim = _count_nDims(bl_vecs, assume_2D=assume_2D)
if assume_2D:
nDim = 2
keys = sorted(list(bl_vecs.keys()))
# Figure out a grid for baselines and
coords = []
all_bins = []
bl_vecs_array = np.array([bl_vecs[k] for k in keys])
assert zero_pad >= 1, f'zero_pad={zero_pad}, but it must be greater than or equal to 1.'
for d in range(nDim):
min_comp = np.min(bl_vecs_array[:, d])
max_comp = np.max(bl_vecs_array[:, d])
# pick minimum delta in this dimension inconsistent with 0 using bl_error_tol
dbl = np.min(np.abs(bl_vecs_array[:, d])[np.abs(bl_vecs_array[:, d]) >= bl_error_tol])
comp_range = max_comp - min_comp
bins = np.arange(min_comp - dbl - comp_range * (zero_pad - 1) / 2,
max_comp + 2 * dbl + comp_range * (zero_pad - 1) / 2, dbl)
all_bins.append(bins)
coords.append(np.digitize(bl_vecs_array[:, d], bins))
coords = np.array(coords).T
# create and fill grid with complex data
digitized = np.zeros(tuple([len(b) for b in all_bins]) + data[keys[0]].shape, dtype=complex)
for i, k in enumerate(keys):
digitized[tuple(coords[i])] = data[k]
digitized[~np.isfinite(digitized)] = 0
# FFT along first nDim dimensions
digitized_fft = np.fft.fftn(digitized, axes=tuple(range(nDim)))
# Condense the FFTed dimensions and find the max along them
new_shape = (np.prod(digitized_fft.shape[0:nDim]),) + data[keys[0]].shape
arg_maxes = digitized_fft.reshape(new_shape).argmax(0)
# Find the coordinates of the peaks in the FFT dimensions
peak_coords = np.unravel_index(arg_maxes, digitized_fft.shape[0:nDim])
# Convert coordinates to phase slopes using fft_freq
phase_slopes = []
for d in range(nDim):
fourier_modes = np.fft.fftfreq(len(all_bins[d]), np.median(np.diff(all_bins[d])))
phase_slopes.append(fourier_modes[peak_coords[d]] * 2 * np.pi)
return phase_slopes
def global_phase_slope_logcal(model, data, antpos, reds=None, solver='linfit', wgts=None, refant=None,
assume_2D=True, verbose=True, tol=1.0, edge_cut=0, time_avg=False,
zero_pad=2, return_gains=False, gain_ants=[]):
"""
Solve for a frequency-independent spatial phase slope using the equation
median_over_freq(angle(V_ij,xy^data / V_ij,xy^model)) = dot(Phi_x, r_i) - dot(Phi_y, r_j)
Parameters:
-----------
model : visibility data of refence model, type=DataContainer
keys are antenna-pair + polarization tuples, Ex. (1, 2, 'nn').
values are complex ndarray visibilities.
these must 2D arrays, with [0] axis indexing time
and [1] axis indexing frequency.
data : visibility data of measurements, type=DataContainer
keys are antenna pair + pol tuples (must match model), values are
complex ndarray visibilities matching shape of model
antpos : type=dictionary, antpos dictionary. antenna num as key, position vector as value.
reds : list of list of redundant baselines. If left as None (default), will try to infer
reds from antpos, though if the antenna position dimensionaility is > 3, this will fail.
solver : 'linfit' uses linsolve to fit phase slope across the array.
'dft' uses a spatial Fourier transform to find a phase slope, only works in 2D.
'ndim_fft' uses a gridded spatial Fourier transform instead, but works in ND.
wgts : weights of data, type=DataContainer, [default=None]
keys are antenna pair + pol tuples (must match model), values are real floats
matching shape of model and data. These are only used to find delays from
itegrations that are unflagged for at least two frequency bins. In this case,
the delays are assumed to have equal weight, otherwise the delays take zero weight.
refant : antenna number integer to use as a reference,
The antenna position coordaintes are centered at the reference, such that its phase
is identically zero across all frequencies. If None, use the first key in data as refant.
assume_2D : type=boolean, [default=False]
If this is true, all dimensions of antpos beyond the first two will be ignored.
If return_gains is False and assume_2D is False, then the returned variables will
look like Phi_0, Phi_1, Phi_2, etc. corresponding to the dimensions in antpos.
verbose : print output, type=boolean, [default=False]
tol : type=float, baseline match tolerance in units of baseline vectors (e.g. meters)
edge_cut : int, number of channels to exclude at each band edge in phase slope solver
time_avg : boolean, if True, replace resultant antenna phase slopes with the median across time
zero_pad : float factor by which to expand the grid onto which the data is binned. Only used
for ndim_fft mode. Must be >= 1.
return_gains : boolean. If True, convert result into a dictionary of gain waterfalls.
gain_ants : list of ant-pol tuples for return_gains dictionary
Output:
-------
if not return_gains:
fit : dictionary containing frequency-indpendent phase slope, e.g. Phi_ns_Jxx
for each position component and polarization in units of radians / [antpos].
If assume_2D is False, then these will be the more general Phi_0, Phi_1,
Phi_2, etc. corresponding to the dimensions in antpos.
else:
gains : dictionary with gain_ants as keys and gain waterfall arrays as values
"""
# check solver and edgecut
assert solver in PHASE_SLOPE_SOLVERS, f"Unrecognized solver {solver}"
echo(f"...configuring global_phase_slope_logcal for the {solver} algorithm", verbose=verbose)
assert 2 * edge_cut < list(data.values())[0].shape[1] - 1, "edge_cut cannot be >= Nfreqs/2 - 1"
# get keys from model and data dictionaries
keys = sorted(set(model.keys()) & set(data.keys()))
antnums = np.unique(list(antpos.keys()))
# make weights if None and make flags
if wgts is None:
wgts = odict()
for i, k in enumerate(keys):
wgts[k] = np.ones_like(data[k], dtype=np.float)
flags = DataContainer({k: ~wgts[k].astype(np.bool) for k in wgts})
# center antenna positions about the reference antenna
if refant is None:
refant = keys[0][0]
assert refant in antnums, "reference antenna {} not found in antenna list".format(refant)
antpos = odict(list(map(lambda k: (k, antpos[k] - antpos[refant]), antpos.keys())))
# count dimensions of antenna positions, figure out how many to solve for
nDims = _count_nDims(antpos, assume_2D=assume_2D)
# average data over baselines
if reds is None:
reds = redcal.get_pos_reds(antpos, bl_error_tol=tol)
ap = data.antpairs()
reds_here = []
for red in reds:
red_here = [bl[0:2] for bl in red if bl[0:2] in ap or bl[0:2][::-1] in ap] # if the reds have polarizations, ignore them
if len(red_here) > 0:
reds_here.append(red_here)
avg_data, avg_flags, _ = utils.red_average(data, reds=reds_here, flags=flags, inplace=False)
red_keys = list(avg_data.keys())
avg_wgts = DataContainer({k: (~avg_flags[k]).astype(np.float) for k in avg_flags})
avg_model, _, _ = utils.red_average(model, reds=reds_here, flags=flags, inplace=False)
ls_data, ls_wgts, bl_vecs, pols = {}, {}, {}, {}
for rk in red_keys:
# build equation string
eqn_str = ''
ap0, ap1 = split_pol(rk[2])
for d in range(nDims):
if len(eqn_str) > 0:
eqn_str += ' + '
eqn_str += f'{antpos[rk[0]][d]}*Phi_{d}_{ap0} - {antpos[rk[1]][d]}*Phi_{d}_{ap1}'
bl_vecs[eqn_str] = antpos[rk[0]] - antpos[rk[1]]
pols[eqn_str] = rk[2]
# calculate median of unflagged angle(data/model)
# ls_weights are sum of non-binary weights
dm_ratio = avg_data[rk] / avg_model[rk]
dm_ratio /= np.abs(dm_ratio) # This gives all channels roughly equal weight, moderating the effect of RFI (as in firstcal)
binary_flgs = np.isclose(avg_wgts[rk], 0.0) | np.isinf(dm_ratio) | np.isnan(dm_ratio)
avg_wgts[rk][binary_flgs] = 0.0
dm_ratio[binary_flgs] *= np.nan
if solver == 'linfit': # we want to fit the angles
ls_data[eqn_str] = np.nanmedian(np.angle(dm_ratio[:, edge_cut:(dm_ratio.shape[1] - edge_cut)]), axis=1, keepdims=True)
elif solver in ['dft', 'ndim_fft']: # we want the full complex number
ls_data[eqn_str] = np.nanmedian(dm_ratio[:, edge_cut:(dm_ratio.shape[1] - edge_cut)], axis=1, keepdims=True)
ls_wgts[eqn_str] = np.sum(avg_wgts[rk][:, edge_cut:(dm_ratio.shape[1] - edge_cut)], axis=1, keepdims=True)
# set unobserved data to 0 with 0 weight
ls_wgts[eqn_str][~np.isfinite(ls_data[eqn_str])] = 0
ls_data[eqn_str][~np.isfinite(ls_data[eqn_str])] = 0
if solver == 'linfit': # build linear system for phase slopes and solve with linsolve
# setup linsolve and run
solver = linsolve.LinearSolver(ls_data, wgts=ls_wgts)
echo("...running linsolve", verbose=verbose)
fit = solver.solve()
echo("...finished linsolve", verbose=verbose)
elif solver in ['dft', 'ndim_fft']: # look for a peak angle slope by FTing across the array
if not np.all([split_pol(pol)[0] == split_pol(pol)[1] for pol in data.pols()]):
raise NotImplementedError('DFT/FFT solving of global phase not implemented for abscal with cross-polarizations.')
for k in ls_data:
ls_data[k][ls_wgts[k] == 0] = np.nan
# solve one polarization at a time
fit = {}
for pol in data.pols():
eqkeys = [k for k in bl_vecs.keys() if pols[k] == pol]
# reformat data into arrays for dft_phase_slope_solver
if solver == 'dft':
assert assume_2D, 'dft solver only works when the array is 2D. Try using ndim_fft instead.'
blx = np.array([bl_vecs[k][0] for k in eqkeys])
bly = np.array([bl_vecs[k][1] for k in eqkeys])
data_array = np.array([ls_data[k] for k in eqkeys])
slope_x, slope_y = dft_phase_slope_solver(blx, bly, data_array)
fit['Phi_0_{}'.format(split_pol(pol)[0])] = slope_x
fit['Phi_1_{}'.format(split_pol(pol)[0])] = slope_y
# Perform ndim_fft solver
elif solver == 'ndim_fft':
slopes = ndim_fft_phase_slope_solver({k: ls_data[k] for k in eqkeys}, {k: bl_vecs[k] for k in eqkeys},
assume_2D=assume_2D, zero_pad=zero_pad, bl_error_tol=tol)
for d, slope in enumerate(slopes):
fit[f'Phi_{d}_{split_pol(pol)[0]}'] = slope
# time average
if time_avg:
Ntimes = list(fit.values())[0].shape[0]
for k in fit:
fit[k] = np.repeat(np.moveaxis(np.median(fit[k], axis=0)[np.newaxis], 0, 0), Ntimes, axis=0)
if not return_gains:
# rename variables ew/ns instead of 0/1 to maintain backwards compatability
if assume_2D:
params = list(fit.keys())
for p in params:
if 'Phi_0' in p:
fit[p.replace('Phi_0', 'Phi_ew')] = fit[p]
del fit[p]
if 'Phi_1' in p:
fit[p.replace('Phi_1', 'Phi_ns')] = fit[p]
del fit[p]
return fit
else:
# compute gains, dotting each slope into the corresponding coordinate in that dimension
gains = {}
for ant in gain_ants:
Phis = [fit[f'Phi_{d}_{ant[1]}'] for d in range((nDims, 2)[assume_2D])]
gains[ant] = np.exp(1.0j * np.einsum('i,ijk,k->jk', antpos[ant[0]][0:len(Phis)],
Phis, np.ones(data[keys[0]].shape[1])))
return gains
def merge_gains(gains, merge_shared=True):
"""
Merge a list of gain (or flag) dictionaries.
If gains has boolean ndarray keys, interpret as flags
and merge with a logical OR.
Parameters:
-----------
gains : type=list or tuple, series of gain dictionaries with (ant, pol) keys
and complex ndarrays as values (or boolean ndarrays if flags)
merge_shared : type=bool, If True merge only shared keys, eliminating the others.
Otherwise, merge all keys.
Output:
-------
merged_gains : type=dictionary, merged gain (or flag) dictionary with same key-value
structure as input dict.
"""
# get shared keys
if merge_shared:
keys = sorted(set(reduce(operator.and_, [set(g.keys()) for g in gains])))
else:
keys = sorted(set(reduce(operator.add, [list(g.keys()) for g in gains])))
# form merged_gains dict
merged_gains = odict()
# determine if gains or flags from first entry in gains
fedflags = False
if gains[0][list(gains[0].keys())[0]].dtype == np.bool_:
fedflags = True
# iterate over keys
for i, k in enumerate(keys):
if fedflags:
merged_gains[k] = reduce(operator.add, [g.get(k, True) for g in gains])
else:
merged_gains[k] = reduce(operator.mul, [g.get(k, 1.0) for g in gains])
return merged_gains
def data_key_to_array_axis(data, key_index, array_index=-1, avg_dict=None):
"""
move an index of data.keys() into the data axes
Parameters:
-----------
data : type=DataContainer, complex visibility data with
antenna-pair + pol tuples for keys, in DataContainer dictionary format.
key_index : integer, index of keys to consolidate into data arrays
array_index : integer, which axes of data arrays to append to
avg_dict : DataContainer, a dictionary with same keys as data
that will have its data arrays averaged along key_index
Result:
-------
new_data : DataContainer, complex visibility data
with key_index of keys moved into the data arrays
new_avg_dict : copy of avg_dict. Only returned if avg_dict is not None.
popped_keys : unique list of keys moved into data array axis
"""
# instantiate new data object
new_data = odict()
new_avg = odict()
# get keys
keys = list(data.keys())
# sort keys across key_index
key_sort = np.argsort(np.array(keys, dtype=np.object)[:, key_index])
keys = list(map(lambda i: keys[i], key_sort))
popped_keys = np.unique(np.array(keys, dtype=np.object)[:, key_index])
# get new keys
new_keys = list(map(lambda k: k[:key_index] + k[key_index + 1:], keys))
new_unique_keys = []
# iterate over new_keys
for i, nk in enumerate(new_keys):
# check for unique keys
if nk in new_unique_keys:
continue
new_unique_keys.append(nk)
# get all instances of redundant keys
ravel = list(map(lambda k: k == nk, new_keys))
# iterate over redundant keys and consolidate into new arrays
arr = []
avg_arr = []
for j, b in enumerate(ravel):
if b:
arr.append(data[keys[j]])
if avg_dict is not None:
avg_arr.append(avg_dict[keys[j]])
# assign to new_data
new_data[nk] = np.moveaxis(arr, 0, array_index)
if avg_dict is not None:
new_avg[nk] = np.nanmean(avg_arr, axis=0)
if avg_dict is not None:
return new_data, new_avg, popped_keys
else:
return new_data, popped_keys
def array_axis_to_data_key(data, array_index, array_keys, key_index=-1, copy_dict=None):
"""
move an axes of data arrays in data out of arrays
and into a unique key index in data.keys()
Parameters:
-----------
data : DataContainer, complex visibility data with
antenna-pair (+ pol + other) tuples for keys
array_index : integer, which axes of data arrays
to extract from arrays and move into keys
array_keys : list, list of new key from array elements. must have length
equal to length of data_array along axis array_index
key_index : integer, index within the new set of keys to insert array_keys
copy_dict : DataContainer, a dictionary with same keys as data
that will have its data arrays copied along array_keys
Output:
-------
new_data : DataContainer, complex visibility data
with array_index of data arrays extracted and moved
into a unique set of keys
new_copy : DataContainer, copy of copy_dict
with array_index of data arrays copied to unique keys
"""
# instantiate new object
new_data = odict()
new_copy = odict()
# get keys
keys = sorted(data.keys())
new_keys = []
# iterate over keys
for i, k in enumerate(keys):
# iterate overy new array keys
for j, ak in enumerate(array_keys):
new_key = list(k)
if key_index == -1:
new_key.insert(len(new_key), ak)
else:
new_key.insert(key_index, ak)
new_key = tuple(new_key)
new_data[new_key] = np.take(data[k], j, axis=array_index)
if copy_dict is not None:
new_copy[new_key] = copy.copy(copy_dict[k])
if copy_dict is not None:
return new_data, new_copy
else:
return new_data
def wiener(data, window=(5, 11), noise=None, medfilt=True, medfilt_kernel=(3, 9), array=False):
"""
wiener filter complex visibility data. this might be used in constructing
model reference. See scipy.signal.wiener for details on method.
Parameters:
-----------
data : type=DataContainer, ADataContainer dictionary holding complex visibility data
unelss array is True
window : type=tuple, wiener-filter window along each axis of data
noise : type=float, estimate of noise. if None will estimate itself
medfilt : type=bool, if True, median filter data before wiener filtering
medfilt_kernel : type=tuple, median filter kernel along each axis of data
array : type=boolean, if True, feeding a single ndarray, rather than a dictionary
Output: (new_data)
-------
new_data type=DataContainer, DataContainer dictionary holding new visibility data
"""
# check if data is an array
if array:
data = {'arr': data}
new_data = odict()
for i, k in enumerate(list(data.keys())):
real = np.real(data[k])
imag = np.imag(data[k])
if medfilt:
real = signal.medfilt(real, kernel_size=medfilt_kernel)
imag = signal.medfilt(imag, kernel_size=medfilt_kernel)
new_data[k] = signal.wiener(real, mysize=window, noise=noise) + \
1j * signal.wiener(imag, mysize=window, noise=noise)
if array:
return new_data['arr']
else:
return DataContainer(new_data)
def interp2d_vis(model, model_lsts, model_freqs, data_lsts, data_freqs, flags=None,
kind='cubic', flag_extrapolate=True, medfilt_flagged=True, medfilt_window=(3, 7),
fill_value=None):
"""
Interpolate complex visibility model onto the time & frequency basis of
a data visibility. See below for notes on flag propagation if flags is provided.
Parameters:
-----------
model : type=DataContainer, holds complex visibility for model
keys are antenna-pair + pol tuples, values are 2d complex visibility
with shape (Ntimes, Nfreqs).
model_lsts : 1D array of the model time axis, dtype=float, shape=(Ntimes,)
model_freqs : 1D array of the model freq axis, dtype=float, shape=(Nfreqs,)
data_lsts : 1D array of the data time axis, dtype=float, shape=(Ntimes,)
data_freqs : 1D array of the data freq axis, dtype=float, shape=(Nfreqs,)
flags : type=DataContainer, dictionary containing model flags. Can also contain model wgts
as floats and will convert to booleans appropriately.
kind : type=str, kind of interpolation, options=['linear', 'cubic', 'quintic']
medfilt_flagged : type=bool, if True, before interpolation, replace flagged pixels with output from
a median filter centered on each flagged pixel.
medfilt_window : type=tuple, extent of window for median filter across the (time, freq) axes.
Even numbers are rounded down to odd number.
flag_extrapolate : type=bool, flag extrapolated data_lsts if True.
fill_value : type=float, if fill_value is None, extrapolated points are extrapolated
else they are filled with fill_value.
Output: (new_model, new_flags)
-------
new_model : interpolated model, type=DataContainer
new_flags : flags associated with interpolated model, type=DataContainer
Notes:
------
If the data has flagged pixels, it is recommended to turn medfilt_flagged to True. This runs a median
filter on the flagged pixels and replaces their values with the results, but they remain flagged.
This happens *before* interpolation. This means that interpolation near flagged pixels
aren't significantly biased by their presence.
In general, if flags are fed, flags are propagated if a flagged pixel is a nearest neighbor
of an interpolated pixel.
"""
# make flags
new_model = odict()
new_flags = odict()
# get nearest neighbor points
freq_nn = np.array(list(map(lambda x: np.argmin(np.abs(model_freqs - x)), data_freqs)))
time_nn = np.array(list(map(lambda x: np.argmin(np.abs(model_lsts - x)), data_lsts)))
freq_nn, time_nn = np.meshgrid(freq_nn, time_nn)
# get model indices meshgrid
mod_F, mod_L = np.meshgrid(np.arange(len(model_freqs)), np.arange(len(model_lsts)))
# raise warning on flags
if flags is not None and medfilt_flagged is False:
print("Warning: flags are fed, but medfilt_flagged=False. \n"
"This may cause weird behavior of interpolated points near flagged data.")
# ensure flags are booleans
if flags is not None:
if np.issubdtype(flags[list(flags.keys())[0]].dtype, np.floating):
flags = DataContainer(odict(list(map(lambda k: (k, ~flags[k].astype(np.bool)), flags.keys()))))
# loop over keys
for i, k in enumerate(list(model.keys())):
# get model array
m = model[k]
# get real and imag separately
real = np.real(m)
imag = np.imag(m)
# median filter flagged data if desired
if medfilt_flagged and flags is not None:
# get extent of window along freq and time
f_ext = int((medfilt_window[1] - 1) / 2.)
t_ext = int((medfilt_window[0] - 1) / 2.)
# set flagged data to nan
real[flags[k]] *= np.nan
imag[flags[k]] *= np.nan
# get flagged indices
f_indices = mod_F[flags[k]]
l_indices = mod_L[flags[k]]
# construct fill arrays
real_fill = np.empty(len(f_indices), np.float)
imag_fill = np.empty(len(f_indices), np.float)
# iterate over flagged data and replace w/ medfilt
for j, (find, tind) in enumerate(zip(f_indices, l_indices)):
tlow, thi = tind - t_ext, tind + t_ext + 1
flow, fhi = find - f_ext, find + f_ext + 1
ll = 0
while True:
# iterate until window has non-flagged data in it
# with a max of 10 iterations
if tlow < 0:
tlow = 0
if flow < 0:
flow = 0
r_med = np.nanmedian(real[tlow:thi, flow:fhi])
i_med = | np.nanmedian(imag[tlow:thi, flow:fhi]) | numpy.nanmedian |
import os, sys
import numpy as np
from matplotlib import pyplot as plt
from bpe import BPE
from scipy import stats
SEQ_SIZE = 24
TITLE_EMBED_SIZE = 36
TOKEN_EMBED_SIZE = 200
USE_GRU = True
USE_CATS = False
USE_AUTOENC = False
NUM_EPOCHS = 100
BATCH_SIZE = 200
LR = 0.001
DO_RATE = 0.5
BN = 0.99
SAVE_DIR = 'model_cats'
DATA_DIR = 'training_data'
NUM_RAND_GEN = 10
#Create directory to save model
if not os.path.exists(SAVE_DIR):
os.makedirs(SAVE_DIR)
#Load bpe
print('Loading BPE...')
bpe = BPE()
bpe.load(DATA_DIR + '/words800.bpe')
end_token = bpe.str_to_token['\n']
bpe_size = len(bpe.str_to_token)
print('Loaded ' + str(bpe_size) + ' bpe tokens.')
#Load the categories
print('Loading Categories...')
all_categories = {}
with open(DATA_DIR + '/categories.txt', 'r') as fin:
for line in fin:
all_categories[line[:-1]] = len(all_categories)
num_categories = len(all_categories)
if USE_CATS:
TITLE_EMBED_SIZE = num_categories
print('Loaded ' + str(num_categories) + ' categories')
#Create training samples
try:
print('Loading Titles...')
i_train = np.load(DATA_DIR + '/i_train.npy')
c_train = np.load(DATA_DIR + '/c_train.npy')
x_train = np.load(DATA_DIR + '/x_train.npy')
x1_train = np.load(DATA_DIR + '/x1_train.npy')
y_train = np.load(DATA_DIR + '/y_train.npy')
if x_train.shape[1] != SEQ_SIZE:
raise
except:
print('Encoding Titles...')
i_train = []
c_train = []
x_train = []
x1_train = []
y_train = []
with open(DATA_DIR + '/titles_cats.txt', 'r') as fin:
num_titles = 0
for line in fin:
title, category = line[:-1].lower().split('"')
title = title + '\n'
if category == '': category = 'other'
c_vec = np.zeros((num_categories,), dtype=np.float32)
c_vec[all_categories[category]] = 1.0
encoded = np.array(bpe.encode(title), dtype=np.int32)
seq_len = encoded.shape[0]
first_len = min(SEQ_SIZE, seq_len) - 1
x = np.full((SEQ_SIZE,), end_token)
y = np.full((SEQ_SIZE,), end_token)
x[1:1+first_len] = encoded[:first_len]
y[:1+first_len] = encoded[:1+first_len]
x1 = np.copy(x)
i_train.append(num_titles)
c_train.append(c_vec)
x_train.append(x)
x1_train.append(x1)
y_train.append(y)
if seq_len > SEQ_SIZE:
for i in range(seq_len - SEQ_SIZE):
x = encoded[i:i+SEQ_SIZE]
y = encoded[i+1:i+SEQ_SIZE+1]
i_train.append(num_titles)
c_train.append(c_vec)
x_train.append(x)
x1_train.append(x1)
y_train.append(y)
num_titles += 1
if num_titles % 1000 == 0:
print(' ' + str(num_titles))
i_train = np.array(i_train, dtype=np.int32)
i_train = np.expand_dims(i_train, axis=1)
c_train = np.array(c_train, dtype=np.int32)
x_train = np.array(x_train, dtype=np.int32)
x1_train = | np.array(x1_train, dtype=np.int32) | numpy.array |
# -*- Copyright (c) 2020, <NAME>, All rights reserved. -*-
"""
NAME:
Photometry Retrieve Source
PURPOSE:
Given a swift field, find the Zaritsky sources in them and do an initial photometric guess.
get_meta() is a class. You give it an ra/dec and it gives you the SWIFT field
with the objects in them. It includes the coordinates, regions, catalog information,
and intial guesses for flux. The entire object could be saved with pickle.
Notes:
"""
import astropy.units as u
import numpy as np
import pandas as pd
from astropy.io import fits
from astropy.wcs import WCS
from astroquery.skyview import SkyView
from astropy.coordinates import SkyCoord, Angle
from photutils import aperture_photometry, SkyCircularAperture
from photutils.aperture import CircularAperture
################
# SWIFT Class #
################
# Inputs:
# hdu:
# Name of the file itself, opened with fits
# Umag_cutoff:
# First cutoff, how bright do sources need to be in Umag to be considered
# Bmag_cutoff:
# Second cutoff, before removing sources with no Umag, check if Bmag is present + above a threshhold
# fits_origin:
# Most files start at 0, some start at 1.
# aperture_size:
# How big to make the photometric aperture (the circle around the source)
# xdim:
# Pixel coordinates for the xrange you want to run retrieve source functions on
# ydim:
# Pixel coordinates for the yrange you want to run retrieve source functions on
# optical_catalog:
# For speed, pick an optical catalog based on what galaxy you are in.
# Returns:
# A class object with
# - general information about the file (header,exposure_time,cdelt,wcs,filter)
# - The patch of sky or data you are running on.
# - An optical catalog of the sources in that patch
# - UV photometry based on optical astrometry, meant to be a first guess
# - Some detector positions, flags for if region is off the image.
class get_meta():
# Callable Functions
#############################
def with_hdu(self,hdu,usno_catalog,
optical_catalog,
Umag_cutoff=np.nan,
Bmag_cutoff=np.nan,
fits_origin=0,
aperture_size=2.5*2,
xdim=[0,300],
ydim=[0,300],
save_dropped_catalog=True):
# Get general information from the header
self.header = hdu.header
self.exposure_time = self.header["EXPOSURE"]
self.cdelt = np.abs(self.header["CD1_1"])
self.wcs= WCS(self.header)
self.filter = self.header["FILTER"]
# Get data based on patch of sky defined in xdim ydim and divide by exposure time to get count rate
self.data = hdu.data[ydim[0]:ydim[1],xdim[0]:xdim[1]] / self.exposure_time
# Get optical sources for that patch of UV sky
self.optical_catalog_fname = optical_catalog
self.catalog = self.get_catalog_sources(Umag_cutoff,Bmag_cutoff,fits_origin,xdim,ydim)
self.catalog['KEY'] = np.arange(self.catalog.shape[0])
self.pixel_positions = self.get_positions(hdu,self.catalog,fits_origin)
self.pixel_positions[0] = self.pixel_positions[0] - xdim[0]
self.pixel_positions[1] = self.pixel_positions[1] - ydim[0]
self.ra = self.catalog.Ra
self.dec = self.catalog.Dec
# Do some initial photometry to get an initial guess
self.source_intensities = np.array(self.get_intensities(self.catalog,aperture_size))
# Get position with some correction from the detector
self.detector_positions = self.get_det_positions(hdu,self.catalog,fits_origin)
self.outside,self.edge=self.get_edge(self.detector_positions)
# Masking USNO
self.masked_byusno_data,self.usno_drop_cat = self.remove_usno_bright_objects(self.data,usno_catalog,xdim,ydim,threshhold = 65)
# Masking MCPS
self.masked_data,self.mcps_drop_cat = self.mask_mcps(self.masked_byusno_data,self.pixel_positions[0],self.pixel_positions[1],self.source_intensities,self.catalog)
# Combine dropped catalogs
self.drop_cat = self.combine_drop_cat(self.mcps_drop_cat,self.usno_drop_cat,save_dropped_catalog)
return self
# Dependent Functions
#####################
# Open Optical Catalog and Reduce Number of Sources by Some Cutoff.
def get_optical_catalog(self,Umag_cutoff,Bmag_cutoff):
# Read in the optical catalog
labels = ["Ra","Dec","Umag","e_Umag",
"Bmag","e_Bmag","Vmag","e_Vmag","Imag","e_Imag",
"Flag","Jmag","e_Jmag","Hmag","e_Hmag","Ksmag","e_Ksmag"]
optical_catalog = pd.read_csv(self.optical_catalog_fname,sep="\s+",names=labels)
# For optical catalog: hour angle degrees to regular degrees
optical_catalog.Ra = optical_catalog.Ra * 15
if | np.isfinite(Umag_cutoff) | numpy.isfinite |
#!/usr/bin/env python
"""Tests for the linalg.isolve.gcrotmk module
"""
from numpy.testing import (assert_, assert_allclose, assert_equal,
suppress_warnings)
import numpy as np
from numpy import zeros, array, allclose
from scipy.linalg import norm
from scipy.sparse import csr_matrix, eye, rand
from scipy.sparse.linalg.interface import LinearOperator
from scipy.sparse.linalg import splu
from scipy.sparse.linalg.isolve import gcrotmk, gmres
Am = csr_matrix(array([[-2,1,0,0,0,9],
[1,-2,1,0,5,0],
[0,1,-2,1,0,0],
[0,0,1,-2,1,0],
[0,3,0,1,-2,1],
[1,0,0,0,1,-2]]))
b = array([1,2,3,4,5,6])
count = [0]
def matvec(v):
count[0] += 1
return Am*v
A = LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype)
def do_solve(**kw):
count[0] = 0
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x0, flag = gcrotmk(A, b, x0=zeros(A.shape[0]), tol=1e-14, **kw)
count_0 = count[0]
assert_(allclose(A*x0, b, rtol=1e-12, atol=1e-12), norm(A*x0-b))
return x0, count_0
class TestGCROTMK(object):
def test_preconditioner(self):
# Check that preconditioning works
pc = splu(Am.tocsc())
M = LinearOperator(matvec=pc.solve, shape=A.shape, dtype=A.dtype)
x0, count_0 = do_solve()
x1, count_1 = do_solve(M=M)
assert_equal(count_1, 3)
assert_(count_1 < count_0/2)
assert_(allclose(x1, x0, rtol=1e-14))
def test_arnoldi(self):
np.random.seed(1)
A = eye(2000) + rand(2000, 2000, density=5e-4)
b = np.random.rand(2000)
# The inner arnoldi should be equivalent to gmres
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x0, flag0 = gcrotmk(A, b, x0=zeros(A.shape[0]), m=15, k=0, maxiter=1)
x1, flag1 = gmres(A, b, x0=zeros(A.shape[0]), restart=15, maxiter=1)
assert_equal(flag0, 1)
assert_equal(flag1, 1)
assert np.linalg.norm(A.dot(x0) - b) > 1e-3
assert_allclose(x0, x1)
def test_cornercase(self):
np.random.seed(1234)
# Rounding error may prevent convergence with tol=0 --- ensure
# that the return values in this case are correct, and no
# exceptions are raised
for n in [3, 5, 10, 100]:
A = 2*eye(n)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
b = np.ones(n)
x, info = gcrotmk(A, b, maxiter=10)
assert_equal(info, 0)
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
x, info = gcrotmk(A, b, tol=0, maxiter=10)
if info == 0:
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
b = np.random.rand(n)
x, info = gcrotmk(A, b, maxiter=10)
assert_equal(info, 0)
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
x, info = gcrotmk(A, b, tol=0, maxiter=10)
if info == 0:
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
def test_nans(self):
A = eye(3, format='lil')
A[1,1] = np.nan
b = np.ones(3)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x, info = gcrotmk(A, b, tol=0, maxiter=10)
assert_equal(info, 1)
def test_truncate(self):
np.random.seed(1234)
A = np.random.rand(30, 30) + np.eye(30)
b = | np.random.rand(30) | numpy.random.rand |
import os
import cv2
import sys
import json
import numpy as np
import matplotlib.pyplot as plt
from scenedetect import VideoManager
from scenedetect import SceneManager
from scenedetect.detectors import ContentDetector
def find_scenes(video_path, threshold=30.0):
video_manager = VideoManager([video_path])
scene_manager = SceneManager()
scene_manager.add_detector(ContentDetector(threshold=threshold))
video_manager.set_downscale_factor()
video_manager.start()
scene_manager.detect_scenes(frame_source=video_manager)
return scene_manager.get_scene_list()
def optical_flow_extract(frame1, frame2):
prev = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prev, next, None, 0.5, 5, 13, 10, 5, 1.1, 0)
flow_x = np.abs(flow[..., 0])
flow_y = np.abs(flow[..., 1])
mag = np.sqrt(np.add(np.power(flow_x, 2), np.power(flow_y, 2)))
mag = np.abs(np.reshape(mag, (1, -1)))
OF = np.sum(mag, dtype=np.float64)
return OF
def show(data):
x = [i for i in range(len(data))]
y = data
plt.bar(x, y)
plt.title("gaussian optical flow")
plt.xlabel("frame sequence")
return plt
def featrue_extract(input_file, start, end):
print(input_file)
cap = cv2.VideoCapture(input_file)
flow_size = int(end - start)
OF = []
cap.set(cv2.CAP_PROP_POS_FRAMES, start)
for i in range(start, end):
cap.set(cv2.CAP_PROP_POS_FRAMES, i)
ret1, frame1 = cap.read()
ret2, frame2 = cap.read()
flow = optical_flow_extract(frame1, frame2)
OF.append(flow)
cap.release()
variation_factor = []
for i in range(len(OF)):
x = 0
if i == 0:
x = (2 * OF[i]) / (OF[i + 1] + OF[i + 2])
elif i > 0 and i < len(OF) - 1:
x = (2 * OF[i]) / (OF[i - 1] + OF[i + 1])
elif i == len(OF) - 1:
x = (2 * OF[i]) / (OF[i - 1] + OF[i - 2])
variation_factor.append(x)
return variation_factor
def z_gaussian_model(variation_factor):
temp = np.array(variation_factor, dtype=np.float64)
mean = np.mean(temp)
var = np.var(temp)
z = | np.abs(temp - mean) | numpy.abs |
# Unittests:
import pytest
import numpy as np
from ie_pandas import DataFrame
def test_sum_data_frame_list():
x = {
"a": [9, 2, 5, 8],
"b": [True, False, False, True],
"c": ["rasds", "sdsd", "cds", "sd"],
"d": [1.4, 1.5, 3.6, 1.1, 1.3],
}
df_cl = DataFrame(x)
expected_output_sum = [np.sum(x["a"]), np.sum(x["d"])]
output_sum = df_cl.sum()
assert output_sum == expected_output_sum
def test_sum_data_frame_array():
y = {
"a": np.array([9, 2, 5, 8]),
"b": np.array([True, False, False, True]),
"c": np.array(["rasds", "sdsd", "cds", "sd"]),
"d": np.array([1.4, 1.5, 3.6, 1.1, 1.3]),
}
df_cl_y = DataFrame(y)
expected_output_sum = [ | np.sum(y["a"]) | numpy.sum |
import logging
import time
from collections import defaultdict
from typing import Callable, Tuple, List, Dict, Any, Optional, Union
import cv2
import numpy as np
import opensfm.synthetic_data.synthetic_dataset as sd
import scipy.signal as signal
import scipy.spatial as spatial
from opensfm import (
geo,
pygeometry,
reconstruction as rc,
types,
pymap,
features as oft,
)
logger = logging.getLogger(__name__)
def derivative(func: Callable, x: np.ndarray) -> np.ndarray:
eps = 1e-10
d = (func(x + eps) - func(x)) / eps
d /= np.linalg.norm(d)
return d
def samples_generator_random_count(count: int) -> np.ndarray:
return np.random.rand(count)
def samples_generator_interval(
start: float, length: float, interval: float, interval_noise: float
) -> np.ndarray:
samples = np.linspace(start / length, 1, num=int(length / interval))
samples += np.random.normal(
0.0, float(interval_noise) / float(length), samples.shape
)
return samples
def generate_samples_and_local_frame(
samples: np.ndarray, shape: Callable
) -> Tuple[np.ndarray, np.ndarray]:
points = []
tangents = []
for i in samples:
point = shape(i)
points += [point]
ex = derivative(shape, i)
ez = np.array([ex[1], -ex[0]])
tangents += [np.array([ez, ex])]
return np.array(points), np.array(tangents)
def generate_samples_shifted(
samples: np.ndarray, shape: Callable, shift: float
) -> np.ndarray:
plane_points = []
for i in samples:
point = shape(i)
tangent = derivative(shape, i)
tangent = np.array([-tangent[1], tangent[0]])
point += tangent * (shift / 2)
plane_points += [point]
return | np.array(plane_points) | numpy.array |
# ===========================================
#
# mian Analysis Data Mining/ML Library
# @author: tbj128
#
# ===========================================
#
# Imports
#
import numpy as np
import pandas as pd
from scipy import interp
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
from sklearn.metrics import roc_curve, roc_auc_score
from sklearn.model_selection import train_test_split, StratifiedKFold
import random
from mian.model.otu_table import OTUTable
class RandomForest(object):
def run(self, user_request):
table = OTUTable(user_request.user_id, user_request.pid)
otu_table, headers, sample_labels = table.get_table_after_filtering_and_aggregation_and_low_count_exclusion(user_request)
metadata_vals = table.get_sample_metadata().get_metadata_column_table_order(sample_labels, user_request.catvar)
return self.analyse(user_request, otu_table, headers, metadata_vals)
def analyse(self, user_request, otu_table, headers, metadata_vals):
cross_validate_set = user_request.get_custom_attr("crossValidate")
cross_validate_folds = int(user_request.get_custom_attr("crossValidateFolds"))
fix_training = user_request.get_custom_attr("fixTraining")
training_proportion = user_request.get_custom_attr("trainingProportion")
seed = int(user_request.get_custom_attr("seed")) if user_request.get_custom_attr("seed") is not "" else random.randint(0, 100000)
num_trees = int(user_request.get_custom_attr("numTrees"))
max_depth = int(user_request.get_custom_attr("maxDepth")) if user_request.get_custom_attr("maxDepth") != "" else None
if int(user_request.level) == -1:
# OTU tables are returned as a CSR matrix
X = pd.DataFrame.sparse.from_spmatrix(otu_table, columns=headers, index=range(otu_table.shape[0]))
else:
X = pd.DataFrame(otu_table, columns=headers, index=range(otu_table.shape[0]))
Y = np.array(metadata_vals)
uniq_metadata_vals = list(set(Y))
# https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0188475
def binarize(classifier, Y_cv):
actual_class_to_index = {}
for i in range(len(classifier.classes_)):
actual_class_to_index[classifier.classes_[i]] = i
Y_cv_binarize = []
for y_val in Y_cv:
binarized = [0] * len(classifier.classes_)
binarized[actual_class_to_index[y_val]] = 1
Y_cv_binarize.append(binarized)
return np.array(Y_cv_binarize)
def performCrossValidationForAUC(X_cv, metadata_vals_cv, Y_cv):
cv = StratifiedKFold(n_splits=cross_validate_folds, shuffle=True, random_state=seed)
classifier = RandomForestClassifier(n_estimators=num_trees, max_depth=max_depth, oob_score=True)
base_fpr = | np.linspace(0, 1, 51) | numpy.linspace |
""" Test normalizing flow architectures. """
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
import epi.batch_norm
from epi.normalizing_flows import NormalizingFlow, IntervalFlow
from pytest import raises
EPS = 1e-6
def test_NormalizingFlow_init():
"""Test architecture initialization."""
arch_type = "coupling"
D = 4
num_stages = 1
num_layers = 2
num_units = 15
tf.random.set_seed(0)
np.random.seed(0)
# Check setters.
nf = NormalizingFlow(arch_type, D, num_stages, num_layers, num_units)
assert nf.arch_type == "coupling"
assert nf.D == D
assert nf.num_stages == num_stages
assert nf.num_layers == num_layers
assert nf.num_units == num_units
assert nf.batch_norm
assert nf.post_affine
assert nf.lb is None
assert nf.ub is None
assert nf.random_seed == 1
# Test autoregressive
nf = NormalizingFlow("autoregressive", D, num_stages, num_layers, num_units)
assert nf.arch_type == "autoregressive"
lb = -2.0 * np.ones((D,))
ub = 2.0 * np.ones((D,))
bounds = (lb, ub)
nf = NormalizingFlow(
arch_type,
D,
num_stages,
num_layers,
num_units,
"affine",
32,
False,
None,
False,
bounds,
5,
)
assert not nf.batch_norm
assert not nf.post_affine
assert np.equal(nf.lb, lb).all()
assert np.equal(nf.ub, ub).all()
assert nf.random_seed == 5
nf = NormalizingFlow(
arch_type,
D,
num_stages,
num_layers,
num_units,
"affine",
32,
False,
None,
False,
[lb, ub],
5,
)
assert np.equal(nf.lb, lb).all()
assert np.equal(nf.ub, ub).all()
# Test error handling.
with raises(TypeError):
nf = NormalizingFlow(0, D, num_stages, num_layers, num_units)
with raises(ValueError):
nf = NormalizingFlow("foo", D, num_stages, num_layers, num_units)
with raises(TypeError):
nf = NormalizingFlow(arch_type, 2.0, num_stages, num_layers, num_units)
with raises(ValueError):
nf = NormalizingFlow(arch_type, 1, num_stages, num_layers, num_units)
with raises(TypeError):
nf = NormalizingFlow(arch_type, D, 2.0, num_layers, num_units)
with raises(ValueError):
nf = NormalizingFlow(arch_type, D, -1, num_layers, num_units)
with raises(TypeError):
nf = NormalizingFlow(arch_type, D, num_stages, 2.0, num_units)
with raises(ValueError):
nf = NormalizingFlow(arch_type, D, num_stages, 0, num_units)
with raises(TypeError):
nf = NormalizingFlow(arch_type, D, num_stages, num_layers, 2.0)
with raises(ValueError):
nf = NormalizingFlow(arch_type, D, num_stages, num_layers, 0)
with raises(TypeError):
nf = NormalizingFlow(arch_type, D, num_stages, num_layers, 2.0)
with raises(ValueError):
nf = NormalizingFlow(arch_type, D, num_stages, num_layers, 0)
with raises(TypeError):
nf = NormalizingFlow(
arch_type, D, num_stages, num_layers, num_units, batch_norm=1.0
)
with raises(TypeError):
nf = NormalizingFlow(
arch_type,
D,
num_stages,
num_layers,
num_units,
batch_norm=True,
bn_momentum="foo",
)
with raises(TypeError):
nf = NormalizingFlow(
arch_type, D, num_stages, num_layers, num_units, post_affine="foo",
)
with raises(ValueError):
nf = NormalizingFlow(
arch_type, D, num_stages, num_layers, num_units, bounds=(lb, ub, ub)
)
with raises(TypeError):
nf = NormalizingFlow(
arch_type, D, num_stages, num_layers, num_units, bounds=("foo", "bar")
)
with raises(TypeError):
nf = NormalizingFlow(
arch_type, D, num_stages, num_layers, num_units, bounds="foo"
)
with raises(TypeError):
nf = NormalizingFlow(
arch_type, D, num_stages, num_layers, num_units, random_seed=1.0
)
# Check that q0 has correct statistics
nf = NormalizingFlow(arch_type, D, num_stages, num_layers, num_units)
z = nf.q0.sample(100000).numpy()
assert np.isclose(np.mean(z, 0), np.zeros((D,)), atol=1e-2).all()
assert np.isclose(np.cov(z.T), np.eye(D), atol=1e-1).all()
return None
def test_NormalizingFlow_call():
D = 4
num_stages = 1
num_layers = 2
num_units = 15
N = 100
# Check that
# arch_types = ["autoregressive", "coupling"]
arch_types = ["coupling"]
# stage_bijectors = [tfp.bijectors.MaskedAutoregressiveFlow, tfp.bijectors.RealNVP]
stage_bijectors = [tfp.bijectors.RealNVP]
for arch_type, stage_bijector in zip(arch_types, stage_bijectors):
nf = NormalizingFlow(arch_type, D, num_stages, num_layers, num_units)
z = nf(N)
bijectors = nf.trans_dist.bijector.bijectors
assert type(bijectors[1]) is stage_bijector
assert type(bijectors[0]) is tfp.bijectors.Chain
nf = NormalizingFlow(arch_type, D, 2, num_layers, num_units, batch_norm=True)
z = nf(N)
bijectors = nf.trans_dist.bijector.bijectors
assert type(bijectors[4]) is stage_bijector
assert type(bijectors[3]) is tfp.bijectors.ScaleMatvecLU
assert type(bijectors[2]) is epi.batch_norm.BatchNormalization
assert type(bijectors[1]) is stage_bijector
assert type(bijectors[0]) is tfp.bijectors.Chain
nf = NormalizingFlow(arch_type, D, 3, num_layers, num_units, batch_norm=True)
z = nf(N)
bijectors = nf.trans_dist.bijector.bijectors
assert type(bijectors[7]) is stage_bijector
assert type(bijectors[6]) is tfp.bijectors.ScaleMatvecLU
assert type(bijectors[5]) is epi.batch_norm.BatchNormalization
assert type(bijectors[4]) is stage_bijector
assert type(bijectors[3]) is tfp.bijectors.ScaleMatvecLU
assert type(bijectors[2]) is epi.batch_norm.BatchNormalization
assert type(bijectors[1]) is stage_bijector
assert type(bijectors[0]) is tfp.bijectors.Chain
x = nf.sample(5)
assert x.shape[0] == 5
assert x.shape[1] == D
return None
def test_to_string():
nf = NormalizingFlow("coupling", 4, 1, 2, 15)
assert nf.to_string() == "D4_C1_affine_L2_U15_bnmom=0.00E+00_PA_rs1"
nf = NormalizingFlow(
"coupling",
100,
2,
4,
200,
elemwise_fn="spline",
batch_norm=False,
random_seed=20,
)
assert nf.to_string() == "D100_C2_spline_L4_U200_bins=4_PA_rs20"
nf = NormalizingFlow("coupling", 4, 1, 2, 15, bn_momentum=0.999, post_affine=False)
assert nf.to_string() == "D4_C1_affine_L2_U15_bnmom=9.99E-01_rs1"
nf = NormalizingFlow(
"autoregressive", 4, 1, 2, 15, batch_norm=False, post_affine=False
)
assert nf.to_string() == "D4_AR1_affine_L2_U15_rs1"
nf = NormalizingFlow(
"autoregressive", 4, 4, 2, 15, batch_norm=False, post_affine=False
)
assert nf.to_string() == "D4_AR4_affine_L2_U15_rs1"
from scipy.special import expit
def interval_flow_np(x, lb, ub):
def softplus(x):
return np.log(1 + np.exp(-np.abs(x))) + max(0.0, x)
D = x.shape[0]
y = np.zeros((D,))
ldj = 0.0
for i in range(D):
x_i = x[i]
lb_i = lb[i]
ub_i = ub[i]
has_lb = not np.isneginf(lb_i)
has_ub = not np.isposinf(ub_i)
if has_lb and has_ub:
m = ub_i - lb_i
c = lb_i
y[i] = m * expit(x_i) + c
ldj += np.log(m) + np.log(expit(x_i) + EPS) + np.log(expit(-x_i))
elif has_lb:
y[i] = softplus(x_i) + lb_i
ldj += np.log(1.0 / (1.0 + np.exp(-x_i)) + EPS)
elif has_ub:
y[i] = -softplus(x_i) + ub_i
ldj += x_i - softplus(x_i)
else:
y[i] = x_i
return y, ldj
def test_interval_flow():
N = 10
Ds = [2, 4, 10, 15]
rtol = 1e-1
np.random.seed(0)
tf.random.set_seed(0)
lb = np.array([float("-inf"), float("-inf"), -100.0, 20.0])
ub = np.array([float("inf"), 100.0, 30.0, float("inf")])
IF = IntervalFlow(lb, ub)
x = np.random.normal(0.0, 2.0, (N, 4)).astype(np.float32)
y, ldj = IF.forward_and_log_det_jacobian(tf.constant(x))
x_inv = IF.inverse(y)
ildj = IF.inverse_log_det_jacobian(y, 1)
assert np.isclose(x_inv, x, rtol=rtol).all()
assert np.isclose(ldj, -ildj, rtol=rtol).all()
for i in range(N):
y_np, ldj_np = interval_flow_np(x[i], lb, ub)
assert np.isclose(y[i], y_np, rtol=rtol).all()
assert np.isclose(ldj[i], ldj_np, rtol=rtol)
for D in Ds:
lb = np.array(D * [float("-inf")])
ub = np.array(D * [float("inf")])
IF = IntervalFlow(lb, ub)
x = np.random.normal(0.0, 10.0, (N, D)).astype(np.float32)
y, ldj = IF.forward_and_log_det_jacobian(tf.constant(x))
x_inv = IF.inverse(y)
ildj = IF.inverse_log_det_jacobian(y, 1)
assert np.isclose(x_inv, x, rtol=rtol).all()
assert np.isclose(ldj, -ildj, rtol=rtol).all()
for i in range(N):
y_np, ldj_np = interval_flow_np(x[i], lb, ub)
assert np.isclose(y[i], y_np, rtol=rtol).all()
assert np.isclose(ldj[i], ldj_np, rtol=rtol)
lb = np.random.uniform(-1000, 1000, (D,))
ub = np.array(D * [float("inf")])
IF = IntervalFlow(lb, ub)
x = np.random.normal(0.0, 3.0, (N, D)).astype(np.float32)
y, ldj = IF.forward_and_log_det_jacobian(tf.constant(x))
x_inv = IF.inverse(y)
ildj = IF.inverse_log_det_jacobian(y, 1)
assert np.isclose(x_inv, x, rtol=rtol).all()
assert np.isclose(ldj, -ildj, rtol=rtol).all()
for i in range(N):
y_np, ldj_np = interval_flow_np(x[i], lb, ub)
assert np.isclose(y[i], y_np, rtol=rtol).all()
assert np.isclose(ldj[i], ldj_np, rtol=rtol)
lb = np.array(D * [float("-inf")])
ub = np.random.uniform(-1000, 1000, (D,))
IF = IntervalFlow(lb, ub)
x = np.random.normal(0.0, 3.0, (N, D)).astype(np.float32)
y, ldj = IF.forward_and_log_det_jacobian(tf.constant(x))
x_inv = IF.inverse(y)
x_inv_fwd = IF.forward(x_inv)
ildj = IF.inverse_log_det_jacobian(y, 1)
assert np.isclose(x_inv, x, rtol=rtol).all()
assert np.isclose(x_inv_fwd, y, rtol=rtol).all()
assert np.isclose(ldj, -ildj, rtol=rtol).all()
for i in range(N):
y_np, ldj_np = interval_flow_np(x[i], lb, ub)
assert np.isclose(y[i], y_np, rtol=rtol).all()
assert np.isclose(ldj[i], ldj_np, rtol=rtol)
lb = np.random.uniform(-10, -1, (D,))
ub = np.random.uniform(1, 10, (D,))
IF = IntervalFlow(lb, ub)
x = np.random.normal(0.0, 2.0, (N, D)).astype(np.float32)
y, ldj = IF.forward_and_log_det_jacobian(tf.constant(x))
x_inv = IF.inverse(y)
ildj = IF.inverse_log_det_jacobian(y, 1)
assert np.isclose(x_inv, x, rtol=rtol).all()
assert np.isclose(ldj, -ildj, rtol=rtol).all()
for i in range(N):
y_np, ldj_np = interval_flow_np(x[i], lb, ub)
assert np.isclose(y[i], y_np, rtol=rtol).all()
assert np.isclose(ldj[i], ldj_np, rtol=rtol)
with raises(TypeError):
IF = IntervalFlow("foo", ub)
with raises(TypeError):
IF = IntervalFlow(lb, "foo")
with raises(ValueError):
IF = IntervalFlow(lb, ub[:3])
tmp = ub[2]
ub[2] = lb[2]
lb[2] = tmp
with raises(ValueError):
IF = IntervalFlow(lb, ub)
D = 2
lb = [0.0, -1.0]
ub = [1.0, 0.0]
IF = IntervalFlow(lb, ub)
x = np.random.normal(0.0, 1.0, (N, D)).astype(np.float32)
y, ldj = IF.forward_and_log_det_jacobian(tf.constant(x))
x_inv = IF.inverse(y)
ildj = IF.inverse_log_det_jacobian(y, 1)
assert np.isclose(x_inv, x, rtol=rtol).all()
assert np.isclose(ldj, -ildj, rtol=rtol).all()
for i in range(N):
y_np, ldj_np = interval_flow_np(x[i], lb, ub)
assert np.isclose(y[i], y_np, rtol=rtol).all()
return None
def test_initialization():
D = 4
nf = NormalizingFlow("coupling", D, 2, 2, 15, batch_norm=False, post_affine=True)
mu = -0.5 * np.ones((D,))
Sigma = 2.0 * np.eye(D)
nf.initialize(mu, Sigma, num_iters=int(5e3), verbose=True)
z = nf.sample(int(1e4))
z = z.numpy()
mean_z = np.mean(z, 0)
Sigma_z = np.cov(z.T)
assert np.isclose(mean_z, mu, atol=0.5).all()
assert | np.isclose(Sigma_z, Sigma, atol=0.5) | numpy.isclose |
import pandas as pd
import numpy as np
import math, statistics, functools
import matplotlib.pyplot as plt
from sklearn.metrics.cluster import adjusted_rand_score
def read_csv_input(filename):
df = pd.read_csv(filename, header = None).to_numpy()
y = df[:, [0]]
X = df[:, range(1, df.shape[1])]
return X, y
def normalize(X):
means = np.mean(X, axis=0)
stds = np.std(X, axis=0)
return (X-means)/stds, means, stds
def kmeans_random_init(n, k, low=-3, high=3):
return np.random.uniform(low, high, k*n).reshape(k, n)
def gmm_random_init(m, n, k):
means = np.random.multivariate_normal(np.zeros(n), np.identity(n), k)
return means
def kmeans_plus_plus_init(X, k):
m, n = X.shape
means = np.zeros((k, n))
means[0, :] = X[np.random.choice(m), :]
for i in range(1, k):
dis = distance(X, means[:i, :])
nearest_dis_sq = np.power(np.min(dis, axis=1), 2)
pi = nearest_dis_sq / np.sum(nearest_dis_sq)
lucky = np.random.choice(np.arange(m), 1, p=pi)
means[i, :] = X[lucky, :]
return means
def calc_probs(X, means, cov_matrix):
m, n = X.shape
X_diff = X[None, :, :] - means[:, None, :]
var = np.sum(np.matmul(X_diff, np.linalg.inv(cov_matrix)) * X_diff, axis = -1)
numer = np.exp(-0.5 * var).T
denom = ((2*np.pi)**(n/2))*(np.linalg.det(cov_matrix)**(0.5))
return numer/denom
def e_step(X, means, cov_matrix, lams):
probs = calc_probs(X, means, cov_matrix)
lam_probs = probs * lams
q = lam_probs / np.sum(lam_probs, axis = 1, keepdims=True)
return q
def m_step(X, q):
m, n = X.shape
_, k = q.shape
q_3d = np.broadcast_to(q, (1, m, k)).T
X_3d = np.broadcast_to(X, (k, m, n))
numer = np.sum(q_3d * X_3d, axis=1)
denom = np.sum(q_3d, axis=1)
means = numer/denom
X_diff = X_3d - means[:, None, :]
cov_matrix = np.matmul(np.transpose(q_3d * X_diff, (0, 2, 1)), X_diff)/denom[:, :, None]
cov_matrix += (np.identity(n)*1e-6)[None, :, :]
lams = np.sum(q, axis=0)/m
return means, cov_matrix, lams
def gmm(X, k, init_means):
m, n = X.shape
means = init_means
cov_matrix = np.broadcast_to( | np.identity(n) | numpy.identity |
"""
The module 'solver' contains the LeeWaveSolver class, which allows solution of the 2D, steady, linear, lee wave problem.
"""
import numpy as np
from numpy import pi
import matplotlib.pyplot as plt
import xarray as xr
import scipy.fft as fft
import cmocean
class LeeWaveSolver:
"""
A 2D, steady, linear, solver for lee waves
Given user preferences of domain size, topography, background flow, upper boundary type, rotation,
viscosity/diffusivity, and hydrostatic/nonhydrostatic, LeeWaveSolver solves the linear lee wave problem and finds
the wave variables and energy diagnostics.
Attributes
----------
nx : int
Grid size in the horizontal x-direction. Set in class initialisation. Should be even, default: 800
nz : int
Grid size in the vertical z-direction. Set in class initialisation. Should be odd, default: 201
nm : int
Number of modes to be used in Galerkin solver. Set in class initialisation. Should be less than nz, default: 200
H : int
Fluid depth in metres. Set in class initialisation. Default 3000.
L : int
Horizontal domain half-length in metres. Set in class initialisation. Default 20000.
dx : float
Horizontal grid spacing. Set by solver and shouldn't be changed.
x : np.ndarray
Horizontal x coordinate, length nx. Set by solver and shouldn't be changed.
dz : float
Vertical grid spacing. Set by solver and shouldn't be changed.
z : np.ndarray
Vertical z coordinate, length nz. Set by solver and shouldn't be changed.
h_topo : np.ndarray
Topographic height, length nx. Set by method set_topo(), defaults to Gaussian.
U : np.ndarray
Vertical profile of background flow speed, length nz. Set by method set_mean_velocity().
Defaults to a uniform 0.1 m/s.
U_type : str
Type of background flow speed, 'Uniform', 'Linear', or 'Custom'. Set by parameter to set_mean_velocity(),
defaults to 'Uniform'.
N : np.ndarray
Vertical profile of background buoyancy frequency N (stratification), length nz. Set by method
set_mean_stratification(). Defaults to a uniform 0.001 s^{-1}.
uniform_mean : bool
True if background flows are uniform (speeds up solver). False otherwise. Set internally.
f : float
Coriolis parameter (s^{-1}). Set as a parameter to solve(), defaults to 0.
Ah : float
Horizontal Laplacian viscosity (m^2/s). Set as a parameter to solve(), defaults to 1 m^2/s.
Dh : float
Horizontal Laplacian diffusivity (m^2/s). Set as a parameter to solve(), defaults to Ah.
rho_0 : float
Reference density (kg/m^3). Set in initialisation of class. Default 1027 kg/m^3.
hydrostatic : bool
True if using hydrostatic approximation, False otherwise. Set as a parameter to solve(), defaults to False.
open_boundary : bool
True if using a freely radiating upper boundary, False for a rigid lid boundary condition. Set as a parameter
to solve(), defaults to True.
wave_fields : xarray.core.dataset.Dataset
Dataset created by method solve() containing solution wave fields and background flow.
None until solve() is called
diags : xarray.core.dataset.Dataset
Dataset created by method solve() containing 1D and 2D energy diagnostics.
None until solve() is called
Methods
-------
set_topo(topo_type='Gaussian', h0=50, width=1000, k_topo=2 * pi / 5000, k_max=0.01, k_min=0.001,
K0=2.3e-4, L0=1.3e-4, mu=3.5, h_input=None)
Sets the topography according to user preferences.
set_mean_velocity(U_type='Uniform', U_0=0.1, U_H=0.3, U_input=None)
Sets the mean (background) vertical velocity profile according to user preferences.
set_mean_stratification(N_type='Uniform', N_0=0.001, N_H=0.003, N_input=None)
Sets the mean (background) vertical buoyancy frequency (N) profile according to user preferences.
plot_inputs()
Show a plot of the topography, background velocity and stratification profiles.
solve(f=0, open_boundary=True, hydrostatic=True, Ah=1.0, Dh=None)
Solves the given lee wave problem.
plot(array)
Custom plotting function to easily create 1D profiles or 2D pcolormesh plots of an field from wave_fields or
diags
"""
def __init__(self, nx=800, nz=201, nm=200, H=3000, L=20000, rho_0=1027.):
"""
Parameters
----------
nx : int
Grid size in the horizontal x-direction. Should be even, default: 800
nz : int
Grid size in the vertical z-direction. Should be odd, default: 201
nm : int
Number of modes to be used in Galerkin solver. Should be less than nz, default: 200
H : int
Fluid depth in metres, default 3000.
L : int
Horizontal domain half-length in metres, default 20000.
rho_0 : float
Reference density (kg/m^3), default 1027 kg/m^3.
"""
if nx % 2 != 0 or nz % 2 != 1:
raise ValueError('nx should be even and nz should be odd')
# Initialise attributes
self.nx = nx
self.nz = nz
self.nm = nm
self.H = H
self.L = L
self.dx = 2 * self.L / self.nx
self.x = np.linspace(-self.L, self.L - self.dx, self.nx)
self.dz = self.H / self.nz
self.z = np.linspace(0, self.H, self.nz)
self.h_topo = None
self.U = 0.1 * np.ones_like(self.z)
self.U_type = 'Uniform'
self.N = 0.001 * np.ones_like(self.z)
self.uniform_mean = True
self.f = 0.
self.Ah = 0.
self.Dh = 0.
self.rho_0 = float(rho_0)
self.hydrostatic = False
self.open_boundary = True
self.wave_fields = None
self.diags = None
# Call methods to initialise topography and background flow to default.
self.set_topo()
self.set_mean_velocity()
self.set_mean_stratification()
def set_topo(self, topo_type='Gaussian', h0=50., width=1000, k_topo=2 * pi / 5000, k_max=0.01, k_min=0.001,
K0=2.3e-4, L0=1.3e-4, mu=3.5, h_input=None):
"""
Finds topography given by user preference and sets internally as an attribute to the solver.
Parameters
----------
topo_type : str
Type of topography to create.
- 'Gaussian' creates h(x) = h0*exp(-x**2/width**2), uses only h0 and width
- 'WitchOfAgnesi' creates h(x) = h0/(1 + x**2/width**2), uses only h0 and width
- 'Monochromatic' creates h(x) = h0*cos(k_topo*x), uses only h0 and k_topo. If a full number of wavelengths
doesn't fit in the horizontal domain, the domain half-length L and x will be adjusted.
- 'GJ98' creates topography according to the Goff & Jordan (1998) theoretical spectrum. The 2D spectrum is
integrated over cross-stream wavenumbers l to get a 1D spectrum. k_min is the minimum wavelength cutoff,
k_max is the maximum wavelength cutoff, K0 is the rolloff k-wavenumber, L0 is the rolloff l-wavenumber,
mu/2 is the 'slope'. Default parameters are as described for the Drake Passage in
Nikurashin & Ferrari (2010b). The resulting topography is normalised so that the RMS height is h0.
- 'Custom' allows the user to input a topographic profile h(x) in h_input, it must have the same shape as x.
h0 : float
Topographic height (m), used in 'Gaussian', 'WitchOfAgnesi', 'Monochromatic', and 'GJ98' topography types.
Defaults to 50m.
width : float
Topographic width (m), used in 'Gaussian' and 'WitchOfAgnesi' topography types. Defaults to 1000m.
k_topo : float
Topographic wavenumber (rad/m), used in 'Monochromatic' topography. Defaults to 2*pi/5000 rad/m.
k_max : float
Maximum wavenumber cutoff (rad/m), used in 'GJ1998' topography. Usually N/U. Defaults to 0.01 rad/m.
k_min : float
Minimum wavenumber cutoff (rad/m), used in 'GJ1998' topography. Usually f/U. Defaults to 0.001 rad/m.
K0 : float
Rolloff wavenumber in x, used in 'GJ1998' topography. Defaults to 2.3e-4 rad/m.
L0 : float
Rolloff wavenumber in y, used in 'GJ1998' topography. Defaults to 1.3e-4 rad/m.
mu : float
Slope parameter, used in 'GJ1998' topography. Defaults to 3.5.
h_input : np.ndarray
User input topography, length nx. Used in 'Custom' topography
"""
if topo_type == 'Gaussian':
if h0 > self.H:
raise ValueError('Topography height should be less than domain height')
elif width > self.L / 5:
raise ValueError('Topography width is too large compared to the length of domain')
self.h_topo = h0 * np.exp(-self.x ** 2 / width ** 2)
elif topo_type == 'WitchOfAgnesi':
if h0 > self.H:
raise ValueError('Topography height should be less than domain height')
elif width > self.L / 5:
raise ValueError('Topography width is too large compared to the length of domain')
self.h_topo = h0 / (1 + self.x ** 2 / width ** 2)
elif topo_type == 'Monochromatic':
if h0 > self.H:
raise ValueError('Topography height should be less than domain height')
elif k_topo < 2 * pi / self.L:
raise ValueError('Topography width is too large compared to the length of domain')
lam = 2 * pi / k_topo
n = self.L / lam
if round(n) != n:
self.L = lam * round(n)
self.dx = 2 * self.L / self.nx
self.x = np.linspace(-self.L, self.L - self.dx, self.nx)
warnings.warn(
f'Domain width L has been adjusted to {self.L:.2f}m to allow topography with wavelength {lam:.2f}m')
self.h_topo = h0 * np.cos(self.x * k_topo)
elif topo_type == 'GJ98':
if h0 > self.H:
raise ValueError('Topography height should be less than domain height')
self.h_topo = self.__GJ98topo(h0, k_max, k_min, K0, L0, mu)
elif topo_type == 'Custom':
if h_input is None:
raise ValueError('Topography needs to be given in \'h_input\'')
elif len(h_input) != len(self.x):
raise ValueError('\'h_input\' should be the same length as x (solver.x)')
self.h_topo = h_input
self.h_topo -= np.min(self.h_topo)
def set_mean_velocity(self, U_type='Uniform', U_0=0.1, U_H=0.3, U_input=None):
"""
Finds velocity profile given by user preference and sets internally as an attribute to the solver.
Parameters
----------
U_type: str
Type of background velocity profile to create
- 'Uniform' creates U(z) as a uniform profile of length nz with value U_0. Uses only U_0.
- 'Linear' creates U(z) as a linear profile of length nz with bottom value U_0 and surface value U_H.
Uses U_0 and U_H.
- 'Custom' creates a user input velocity profile given by U_input. Must be of length nz. For a strictly 2D
background flow, geostrophic balance requires fU_zz = 0. Be aware of this if you choose a non-constant shear.
U_0 : float
Bottom flow speed (m/s). Used in 'Uniform' and 'Linear' profiles, defaults to 0.1m/s.
U_H : float
Surface flow speed (m/s). Used in 'Linear' profile, defaults to 0.3m/s.
U_input: np.ndarray
User input velocity profile, length nz, used in 'Custom' type.
"""
self.U_type = U_type
if U_type == 'Uniform':
self.U = U_0 * np.ones_like(self.z)
elif U_type == 'Linear':
self.uniform_mean = False
self.U = U_0 + (U_H - U_0) / self.H * self.z
elif U_type == 'Custom':
self.uniform_mean = False
if U_input is None:
raise ValueError('U needs to be given in \'U_input\'')
elif len(U_input) != len(self.z):
raise ValueError('\'U_input\' should be the same length as z (solver.z)')
self.U = U_input
def set_mean_stratification(self, N_type='Uniform', N_0=0.001, N_H=0.003, N_input=None):
"""
Finds buoyancy frequency profile given by user preference and sets internally as an attribute to the solver.
Parameters
----------
N_type: str
Type of background stratification profile to create
- 'Uniform' creates N(z) as a uniform profile of length nz with value N_0. Uses only N_0.
- 'Linear' creates N(z) as a linear profile of length nz with bottom value N_0 and surface value N_H.
Uses N_0 and N_H.
- 'Custom' creates a user input stratification profile given by N_input. Must be of length nz.
N_0 : float
Bottom buoyancy frequency (s^{-1}). Used in 'Uniform' and 'Linear' profiles, defaults to 0.001s^{-1}.
N_H : float
Surface buoyancy frequency (s^{-1}). Used in 'Linear' profile, defaults to 0.003s^{-1}.
N_input: np.ndarray
User input buoyancy frequency profile, length nz, used in 'Custom' type.
"""
if N_type == 'Uniform':
self.N = N_0 * np.ones_like(self.z)
elif N_type == 'Linear':
self.uniform_mean = False
self.N = N_0 + (N_H - N_0) / self.H * self.z
elif N_type == 'Custom':
self.uniform_mean = False
if N_input is None:
raise ValueError('N needs to be given in \'N_input\'')
elif len(N_input) != len(self.z):
raise ValueError('\'N_input\' should be the same length as z (solver.z)')
self.N = N_input
def plot_inputs(self):
""" Show a plot of the topography, background velocity and stratification profiles."""
plt.rcParams.update({'font.size': 20})
fig, axes = plt.subplots(1, 3, gridspec_kw={'width_ratios': [3, 1, 1]}, figsize=(30, 7))
axes[0].fill(np.append(np.insert(self.x, 0, -self.L), self.L),
np.append(np.insert(self.h_topo, 0, np.min(self.h_topo)), np.min(self.h_topo)), 'k')
axes[0].set_xlabel('x [m]')
axes[0].set_ylabel('Height above bottom [m]')
axes[0].set_ylim([0, self.H])
axes[0].set_xlim([-self.L, self.L])
axes[1].plot(self.U, self.z, 'r', linewidth=3)
axes[1].set_xlabel('U [m/s]')
# axes[1].set_ylabel('Height above bottom [m]')
axes[1].set_ylim([0, self.H])
axes[2].plot(self.N, self.z, 'b', linewidth=3)
axes[2].set_xlabel('N [1/s]')
# axes[2].set_ylabel('Height above bottom [m]')
axes[2].set_ylim([0, self.H])
fig.suptitle('Topography and background flow input to the lee wave solver')
plt.show()
def solve(self, f=0, open_boundary=True, hydrostatic=True, Ah=1.0, Dh=None):
"""
Method to solve linear lee wave problem with the previously set/ default flow and domain. Creates the solver
attributes 'wave_fields' and 'diags'.
Parameters
----------
f : float
Coriolis parameter (s^{-1}), defaults to 0.
open_boundary : bool
True if using a freely radiating upper boundary, False for a rigid lid boundary condition. Defaults to True.
hydrostatic : bool
True if using hydrostatic approximation, False otherwise. Defaults to False.
Ah : float
Horizontal Laplacian viscosity (m^2/s). Defaults to 1 m^2/s.
Dh : float
Horizontal Laplacian diffusivity (m^2/s). Defaults to Ah.
"""
self.f = float(f)
self.open_boundary = open_boundary
self.hydrostatic = hydrostatic
self.Ah = float(Ah)
self.Dh = float(Dh) if Dh is not None else float(Ah)
# First check the inputs are consistent, raise errors or warn if not:
self.__check_inputs()
# Find the transformed topography and truncated and full wavenumber vectors
k_full, k_trunc, h_topo_hat, h_topo_hat_trunc = self.__transform_topo()
# Define the coefficients of the ODE
P, Q = self.__ODEcoeffs(k_trunc)
# Solve for the Fourier transformed wave fields
psi_hat, u_hat, v_hat, w_hat, b_hat, p_hat = self.__fourier_solve(k_full, k_trunc, h_topo_hat_trunc, P, Q)
# Invert to give the real space wave fields
psi = self.__inverse_transform(psi_hat)
u = self.__inverse_transform(u_hat)
v = self.__inverse_transform(v_hat)
w = self.__inverse_transform(w_hat)
b = self.__inverse_transform(b_hat)
p = self.__inverse_transform(p_hat)
# Get 2D background fields
if self.uniform_mean:
U_2D = np.ones((self.nx, self.nz)) * self.U
N2_2D = np.ones((self.nx, self.nz)) * self.N ** 2
B_2D = np.cumsum(N2_2D, 1) * self.dz
else:
U_2D = np.tile(np.expand_dims(self.U, 0), [self.nx, 1])
N2_2D = np.tile(np.expand_dims(self.N ** 2, 0), [self.nx, 1])
B_2D = np.cumsum(N2_2D, 1) * self.dz
# Package everything into a datasets for output
self.wave_fields = self.__make_wave_fields_dataset(k_full, psi, u, v, w, b, p, h_topo_hat, psi_hat, u_hat,
v_hat, w_hat, b_hat, p_hat, U_2D, B_2D, N2_2D)
self.diags = self.__make_diags_dataset()
def plot(self, array, vmin = 'default',vmax='default'):
"""
Simple plotting function to plot output variables from solver.
Uses matplotlib.pyplot and xarray plotting functionality. If array given is 1D, creates a line plot.
If array is 2D, creates a pcolormesh plot, with topography shown.
Parameters
----------
array : xarray.core.dataset.Dataset
Output array created from calling solve(). Can be an array from 'wave_variables' or 'diags'.
"""
plt.rcParams.update({'font.size': 14})
if vmin == 'default':
vmin = array.min().values
if vmax == 'default':
vmax = array.max().values
if len(array.dims) == 0:
print('Needs more than one dimension to make a plot')
elif len(array.dims) == 1:
fig, ax = plt.subplots(1, 1, figsize=(5, 10))
array.plot(y='z', linewidth=2, ax=ax)
else:
fig, ax = plt.subplots(1, 1, figsize=(15, 7))
if (array.min() < 0) & (array.max() > 0):
# Want a symmetric colormap
cmap = cmocean.cm.balance
array.plot(y='z', ax=ax, cmap=cmap)
ax.fill(np.append(np.insert(self.x, 0, -self.L), self.L),
np.append(np.insert(self.h_topo, 0, np.min(self.h_topo)), np.min(self.h_topo)), 'k')
ax.set_ylim([np.min(self.h_topo), self.H])
else:
cmap = cmocean.cm.thermal
array.plot(y='z', ax=ax, cmap=cmap, vmin=vmin, vmax=vmax)
ax.fill(np.append(np.insert(self.x, 0, -self.L), self.L),
np.append(np.insert(self.h_topo, 0, np.min(self.h_topo)), np.min(self.h_topo)), 'k')
ax.set_ylim([np.min(self.h_topo), self.H])
plt.show()
def __GJ98topo(self, h0=25, k_max=0.01, k_min=0.001, K0=2.3e-4, L0=1.3e-4, mu=3.5):
""" Called by set_topo(), creates a realisation of the Goff Jordan (1998) theoretical topography spectrum."""
# Define spectral vectors:
l = pi / self.L * np.arange(-self.nx / 2, self.nx / 2)
k = pi / self.L * np.arange(-self.nx / 2, self.nx / 2)
lm, km = np.meshgrid(l, k)
# Define unscaled power spectrum:
P = (1 + km ** 2 / K0 ** 2 + lm ** 2 / L0 ** 2) ** (-mu / 2)
P1D = 1 / 2 / self.L * np.sum(P, axis=1)
# Define random phases
phase = 2 * pi * np.random.rand(k.shape[0])
# Define transformed topography
h_topo_hat = np.sqrt(np.abs(P1D)) * | np.exp(1j * phase) | numpy.exp |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(139, 'I 4/m m m', transformations)
space_groups[139] = sg
space_groups['I 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(140, 'I 4/m c m', transformations)
space_groups[140] = sg
space_groups['I 4/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(141, 'I 41/a m d :2', transformations)
space_groups[141] = sg
space_groups['I 41/a m d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(142, 'I 41/a c d :2', transformations)
space_groups[142] = sg
space_groups['I 41/a c d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(143, 'P 3', transformations)
space_groups[143] = sg
space_groups['P 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(144, 'P 31', transformations)
space_groups[144] = sg
space_groups['P 31'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(145, 'P 32', transformations)
space_groups[145] = sg
space_groups['P 32'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(146, 'R 3 :H', transformations)
space_groups[146] = sg
space_groups['R 3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(147, 'P -3', transformations)
space_groups[147] = sg
space_groups['P -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(148, 'R -3 :H', transformations)
space_groups[148] = sg
space_groups['R -3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(149, 'P 3 1 2', transformations)
space_groups[149] = sg
space_groups['P 3 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(150, 'P 3 2 1', transformations)
space_groups[150] = sg
space_groups['P 3 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(151, 'P 31 1 2', transformations)
space_groups[151] = sg
space_groups['P 31 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(152, 'P 31 2 1', transformations)
space_groups[152] = sg
space_groups['P 31 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(153, 'P 32 1 2', transformations)
space_groups[153] = sg
space_groups['P 32 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(154, 'P 32 2 1', transformations)
space_groups[154] = sg
space_groups['P 32 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(155, 'R 3 2 :H', transformations)
space_groups[155] = sg
space_groups['R 3 2 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(156, 'P 3 m 1', transformations)
space_groups[156] = sg
space_groups['P 3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(157, 'P 3 1 m', transformations)
space_groups[157] = sg
space_groups['P 3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(158, 'P 3 c 1', transformations)
space_groups[158] = sg
space_groups['P 3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(159, 'P 3 1 c', transformations)
space_groups[159] = sg
space_groups['P 3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(160, 'R 3 m :H', transformations)
space_groups[160] = sg
space_groups['R 3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(161, 'R 3 c :H', transformations)
space_groups[161] = sg
space_groups['R 3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(162, 'P -3 1 m', transformations)
space_groups[162] = sg
space_groups['P -3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(163, 'P -3 1 c', transformations)
space_groups[163] = sg
space_groups['P -3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(164, 'P -3 m 1', transformations)
space_groups[164] = sg
space_groups['P -3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(165, 'P -3 c 1', transformations)
space_groups[165] = sg
space_groups['P -3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(166, 'R -3 m :H', transformations)
space_groups[166] = sg
space_groups['R -3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(167, 'R -3 c :H', transformations)
space_groups[167] = sg
space_groups['R -3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(168, 'P 6', transformations)
space_groups[168] = sg
space_groups['P 6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(169, 'P 61', transformations)
space_groups[169] = sg
space_groups['P 61'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(170, 'P 65', transformations)
space_groups[170] = sg
space_groups['P 65'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(171, 'P 62', transformations)
space_groups[171] = sg
space_groups['P 62'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(172, 'P 64', transformations)
space_groups[172] = sg
space_groups['P 64'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(173, 'P 63', transformations)
space_groups[173] = sg
space_groups['P 63'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(174, 'P -6', transformations)
space_groups[174] = sg
space_groups['P -6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(175, 'P 6/m', transformations)
space_groups[175] = sg
space_groups['P 6/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(176, 'P 63/m', transformations)
space_groups[176] = sg
space_groups['P 63/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(177, 'P 6 2 2', transformations)
space_groups[177] = sg
space_groups['P 6 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(178, 'P 61 2 2', transformations)
space_groups[178] = sg
space_groups['P 61 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(179, 'P 65 2 2', transformations)
space_groups[179] = sg
space_groups['P 65 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(180, 'P 62 2 2', transformations)
space_groups[180] = sg
space_groups['P 62 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(181, 'P 64 2 2', transformations)
space_groups[181] = sg
space_groups['P 64 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(182, 'P 63 2 2', transformations)
space_groups[182] = sg
space_groups['P 63 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(183, 'P 6 m m', transformations)
space_groups[183] = sg
space_groups['P 6 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(184, 'P 6 c c', transformations)
space_groups[184] = sg
space_groups['P 6 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(185, 'P 63 c m', transformations)
space_groups[185] = sg
space_groups['P 63 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(186, 'P 63 m c', transformations)
space_groups[186] = sg
space_groups['P 63 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(187, 'P -6 m 2', transformations)
space_groups[187] = sg
space_groups['P -6 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(188, 'P -6 c 2', transformations)
space_groups[188] = sg
space_groups['P -6 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(189, 'P -6 2 m', transformations)
space_groups[189] = sg
space_groups['P -6 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(190, 'P -6 2 c', transformations)
space_groups[190] = sg
space_groups['P -6 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(191, 'P 6/m m m', transformations)
space_groups[191] = sg
space_groups['P 6/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(192, 'P 6/m c c', transformations)
space_groups[192] = sg
space_groups['P 6/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(193, 'P 63/m c m', transformations)
space_groups[193] = sg
space_groups['P 63/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(194, 'P 63/m m c', transformations)
space_groups[194] = sg
space_groups['P 63/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(195, 'P 2 3', transformations)
space_groups[195] = sg
space_groups['P 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(196, 'F 2 3', transformations)
space_groups[196] = sg
space_groups['F 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(197, 'I 2 3', transformations)
space_groups[197] = sg
space_groups['I 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(198, 'P 21 3', transformations)
space_groups[198] = sg
space_groups['P 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(199, 'I 21 3', transformations)
space_groups[199] = sg
space_groups['I 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(200, 'P m -3', transformations)
space_groups[200] = sg
space_groups['P m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(201, 'P n -3 :2', transformations)
space_groups[201] = sg
space_groups['P n -3 :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(202, 'F m -3', transformations)
space_groups[202] = sg
space_groups['F m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(203, 'F d -3 :2', transformations)
space_groups[203] = sg
space_groups['F d -3 :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(204, 'I m -3', transformations)
space_groups[204] = sg
space_groups['I m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(205, 'P a -3', transformations)
space_groups[205] = sg
space_groups['P a -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(206, 'I a -3', transformations)
space_groups[206] = sg
space_groups['I a -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(207, 'P 4 3 2', transformations)
space_groups[207] = sg
space_groups['P 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(208, 'P 42 3 2', transformations)
space_groups[208] = sg
space_groups['P 42 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(209, 'F 4 3 2', transformations)
space_groups[209] = sg
space_groups['F 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(210, 'F 41 3 2', transformations)
space_groups[210] = sg
space_groups['F 41 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(211, 'I 4 3 2', transformations)
space_groups[211] = sg
space_groups['I 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(212, 'P 43 3 2', transformations)
space_groups[212] = sg
space_groups['P 43 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(213, 'P 41 3 2', transformations)
space_groups[213] = sg
space_groups['P 41 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(214, 'I 41 3 2', transformations)
space_groups[214] = sg
space_groups['I 41 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(215, 'P -4 3 m', transformations)
space_groups[215] = sg
space_groups['P -4 3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(216, 'F -4 3 m', transformations)
space_groups[216] = sg
space_groups['F -4 3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(217, 'I -4 3 m', transformations)
space_groups[217] = sg
space_groups['I -4 3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(218, 'P -4 3 n', transformations)
space_groups[218] = sg
space_groups['P -4 3 n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(219, 'F -4 3 c', transformations)
space_groups[219] = sg
space_groups['F -4 3 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(220, 'I -4 3 d', transformations)
space_groups[220] = sg
space_groups['I -4 3 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(221, 'P m -3 m', transformations)
space_groups[221] = sg
space_groups['P m -3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(222, 'P n -3 n :2', transformations)
space_groups[222] = sg
space_groups['P n -3 n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(223, 'P m -3 n', transformations)
space_groups[223] = sg
space_groups['P m -3 n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(224, 'P n -3 m :2', transformations)
space_groups[224] = sg
space_groups['P n -3 m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(225, 'F m -3 m', transformations)
space_groups[225] = sg
space_groups['F m -3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(226, 'F m -3 c', transformations)
space_groups[226] = sg
space_groups['F m -3 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(227, 'F d -3 m :2', transformations)
space_groups[227] = sg
space_groups['F d -3 m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = | N.array([-1,0,-3]) | numpy.array |
############################################################################
# Copyright ESIEE Paris (2018) #
# #
# Contributor(s) : <NAME> #
# #
# Distributed under the terms of the CECILL-B License. #
# #
# The full license is in the file LICENSE, distributed with this software. #
############################################################################
import unittest
import numpy as np
import higra as hg
def dendrogram_purity_naif(tree, leaf_labels):
from itertools import combinations
tree.lowest_common_ancestor_preprocess()
area = hg.attribute_area(tree)
max_label = np.max(leaf_labels)
label_histo = np.zeros((tree.num_leaves(), max_label + 1), dtype=np.int64)
label_histo[np.arange(tree.num_leaves()), leaf_labels] = 1
label_histo = hg.accumulate_sequential(tree, label_histo, hg.Accumulators.sum)
class_purity = label_histo / area[:, None]
count = 0
total = 0
for label in set(leaf_labels):
same = leaf_labels == label
same_indices, = same.nonzero()
if len(same_indices) < 2:
continue
pairs = list(combinations(same_indices, 2))
count += len(pairs)
pairs = np.asarray(pairs, dtype=np.int64)
lcas = tree.lowest_common_ancestor(pairs[:, 0], pairs[:, 1])
total += np.sum(class_purity[lcas, label])
return total / count
class TestHierarchicalCost(unittest.TestCase):
def test_dendrogram_purity(self):
tree = hg.Tree((5, 5, 6, 7, 7, 6, 8, 8, 8))
labels = np.asarray((1, 1, 0, 1, 0), dtype=np.int32)
p = hg.dendrogram_purity(tree, labels)
self.assertTrue(p == 0.65)
tree = hg.Tree((5, 5, 5, 6, 6, 7, 7, 7))
labels = np.asarray((1, 1, 0, 1, 0), dtype=np.int32)
p = hg.dendrogram_purity(tree, labels)
self.assertTrue( | np.allclose(p, 0.5666666666666667) | numpy.allclose |
#
# OtterTune - async_tasks.py
#
# Copyright (c) 2017-18, Carnegie Mellon University Database Group
#
import random
import queue
import numpy as np
from celery.task import task, Task
from celery.utils.log import get_task_logger
from djcelery.models import TaskMeta
from sklearn.preprocessing import StandardScaler
from analysis.gp import GPRNP
from analysis.gp_tf import GPRGD
from analysis.preprocessing import Bin, DummyEncoder
from analysis.constraints import ParamConstraintHelper
from website.models import PipelineData, PipelineRun, Result, Workload, KnobCatalog, MetricCatalog
from website.parser import Parser
from website.types import PipelineTaskType
from website.utils import DataUtil, JSONUtil
from website.settings import IMPORTANT_KNOB_NUMBER, NUM_SAMPLES, TOP_NUM_CONFIG # pylint: disable=no-name-in-module
from website.settings import (DEFAULT_LENGTH_SCALE, DEFAULT_MAGNITUDE,
MAX_TRAIN_SIZE, BATCH_SIZE, NUM_THREADS,
DEFAULT_RIDGE, DEFAULT_LEARNING_RATE,
DEFAULT_EPSILON, MAX_ITER, GPR_EPS,
DEFAULT_SIGMA_MULTIPLIER, DEFAULT_MU_MULTIPLIER)
from website.settings import INIT_FLIP_PROB, FLIP_PROB_DECAY
from website.types import VarType
LOG = get_task_logger(__name__)
class UpdateTask(Task): # pylint: disable=abstract-method
def __init__(self):
self.rate_limit = '50/m'
self.max_retries = 3
self.default_retry_delay = 60
class AggregateTargetResults(UpdateTask): # pylint: disable=abstract-method
def on_success(self, retval, task_id, args, kwargs):
super(AggregateTargetResults, self).on_success(retval, task_id, args, kwargs)
# Completely delete this result because it's huge and not
# interesting
task_meta = TaskMeta.objects.get(task_id=task_id)
task_meta.result = None
task_meta.save()
class MapWorkload(UpdateTask): # pylint: disable=abstract-method
def on_success(self, retval, task_id, args, kwargs):
super(MapWorkload, self).on_success(retval, task_id, args, kwargs)
# Replace result with formatted result
if not args[0]['bad']:
new_res = {
'scores': sorted(args[0]['scores'].items()),
'mapped_workload_id': args[0]['mapped_workload'],
}
task_meta = TaskMeta.objects.get(task_id=task_id)
task_meta.result = new_res # Only store scores
task_meta.save()
else:
task_meta = TaskMeta.objects.get(task_id=task_id)
task_meta.result = None
task_meta.save()
class ConfigurationRecommendation(UpdateTask): # pylint: disable=abstract-method
def on_success(self, retval, task_id, args, kwargs):
super(ConfigurationRecommendation, self).on_success(retval, task_id, args, kwargs)
result_id = args[0]['newest_result_id']
result = Result.objects.get(pk=result_id)
# Replace result with formatted result
formatted_params = Parser.format_dbms_knobs(result.dbms.pk, retval['recommendation'])
task_meta = TaskMeta.objects.get(task_id=task_id)
retval['recommendation'] = formatted_params
task_meta.result = retval
task_meta.save()
# Create next configuration to try
config = Parser.create_knob_configuration(result.dbms.pk, retval['recommendation'])
retval['recommendation'] = config
result.next_configuration = JSONUtil.dumps(retval)
result.save()
@task(base=AggregateTargetResults, name='aggregate_target_results')
def aggregate_target_results(result_id):
# Check that we've completed the background tasks at least once. We need
# this data in order to make a configuration recommendation (until we
# implement a sampling technique to generate new training data).
latest_pipeline_run = PipelineRun.objects.get_latest()
newest_result = Result.objects.get(pk=result_id)
if latest_pipeline_run is None or newest_result.session.tuning_session == 'randomly_generate':
result = Result.objects.filter(pk=result_id)
knobs_ = KnobCatalog.objects.filter(dbms=result[0].dbms, tunable=True)
knobs_catalog = {k.name: k for k in knobs_}
knobs = {k: v for k, v in
list(knobs_catalog.items())}
# generate a config randomly
random_knob_result = gen_random_data(knobs)
agg_data = DataUtil.aggregate_data(result)
agg_data['newest_result_id'] = result_id
agg_data['bad'] = True
agg_data['config_recommend'] = random_knob_result
return agg_data
# Aggregate all knob config results tried by the target so far in this
# tuning session and this tuning workload.
target_results = Result.objects.filter(session=newest_result.session,
dbms=newest_result.dbms,
workload=newest_result.workload)
if len(target_results) == 0:
raise Exception('Cannot find any results for session_id={}, dbms_id={}'
.format(newest_result.session, newest_result.dbms))
agg_data = DataUtil.aggregate_data(target_results)
agg_data['newest_result_id'] = result_id
agg_data['bad'] = False
return agg_data
def gen_random_data(knobs):
random_knob_result = {}
for name, metadata in list(knobs.items()):
if metadata.vartype == VarType.BOOL:
flag = random.randint(0, 1)
if flag == 0:
random_knob_result[name] = False
else:
random_knob_result[name] = True
elif metadata.vartype == VarType.ENUM:
enumvals = metadata.enumvals.split(',')
enumvals_len = len(enumvals)
rand_idx = random.randint(0, enumvals_len - 1)
random_knob_result[name] = rand_idx
elif metadata.vartype == VarType.INTEGER:
random_knob_result[name] = random.randint(int(metadata.minval), int(metadata.maxval))
elif metadata.vartype == VarType.REAL:
random_knob_result[name] = random.uniform(
float(metadata.minval), float(metadata.maxval))
elif metadata.vartype == VarType.STRING:
random_knob_result[name] = "None"
elif metadata.vartype == VarType.TIMESTAMP:
random_knob_result[name] = "None"
else:
raise Exception(
'Unknown variable type: {}'.format(metadata.vartype))
return random_knob_result
@task(base=ConfigurationRecommendation, name='configuration_recommendation')
def configuration_recommendation(target_data):
LOG.info('configuration_recommendation called')
latest_pipeline_run = PipelineRun.objects.get_latest()
if target_data['bad'] is True:
target_data_res = {}
target_data_res['status'] = 'bad'
target_data_res['info'] = 'WARNING: no training data, the config is generated randomly'
target_data_res['recommendation'] = target_data['config_recommend']
return target_data_res
# Load mapped workload data
mapped_workload_id = target_data['mapped_workload'][0]
mapped_workload = Workload.objects.get(pk=mapped_workload_id)
workload_knob_data = PipelineData.objects.get(
pipeline_run=latest_pipeline_run,
workload=mapped_workload,
task_type=PipelineTaskType.KNOB_DATA)
workload_knob_data = JSONUtil.loads(workload_knob_data.data)
workload_metric_data = PipelineData.objects.get(
pipeline_run=latest_pipeline_run,
workload=mapped_workload,
task_type=PipelineTaskType.METRIC_DATA)
workload_metric_data = JSONUtil.loads(workload_metric_data.data)
X_workload = np.array(workload_knob_data['data'])
X_columnlabels = np.array(workload_knob_data['columnlabels'])
y_workload = np.array(workload_metric_data['data'])
y_columnlabels = np.array(workload_metric_data['columnlabels'])
rowlabels_workload = np.array(workload_metric_data['rowlabels'])
# Target workload data
newest_result = Result.objects.get(pk=target_data['newest_result_id'])
X_target = target_data['X_matrix']
y_target = target_data['y_matrix']
rowlabels_target = np.array(target_data['rowlabels'])
if not np.array_equal(X_columnlabels, target_data['X_columnlabels']):
raise Exception(('The workload and target data should have '
'identical X columnlabels (sorted knob names)'))
if not np.array_equal(y_columnlabels, target_data['y_columnlabels']):
raise Exception(('The workload and target data should have '
'identical y columnlabels (sorted metric names)'))
# Filter Xs by top 10 ranked knobs
ranked_knobs = PipelineData.objects.get(
pipeline_run=latest_pipeline_run,
workload=mapped_workload,
task_type=PipelineTaskType.RANKED_KNOBS)
ranked_knobs = JSONUtil.loads(ranked_knobs.data)[:IMPORTANT_KNOB_NUMBER]
ranked_knob_idxs = [i for i, cl in enumerate(X_columnlabels) if cl in ranked_knobs]
X_workload = X_workload[:, ranked_knob_idxs]
X_target = X_target[:, ranked_knob_idxs]
X_columnlabels = X_columnlabels[ranked_knob_idxs]
# Filter ys by current target objective metric
target_objective = newest_result.session.target_objective
target_obj_idx = [i for i, cl in enumerate(y_columnlabels) if cl == target_objective]
if len(target_obj_idx) == 0:
raise Exception(('Could not find target objective in metrics '
'(target_obj={})').format(target_objective))
elif len(target_obj_idx) > 1:
raise Exception(('Found {} instances of target objective in '
'metrics (target_obj={})').format(len(target_obj_idx),
target_objective))
metric_meta = MetricCatalog.objects.get_metric_meta(newest_result.session.dbms,
newest_result.session.target_objective)
if metric_meta[target_objective].improvement == '(less is better)':
lessisbetter = True
else:
lessisbetter = False
y_workload = y_workload[:, target_obj_idx]
y_target = y_target[:, target_obj_idx]
y_columnlabels = y_columnlabels[target_obj_idx]
# Combine duplicate rows in the target/workload data (separately)
X_workload, y_workload, rowlabels_workload = DataUtil.combine_duplicate_rows(
X_workload, y_workload, rowlabels_workload)
X_target, y_target, rowlabels_target = DataUtil.combine_duplicate_rows(
X_target, y_target, rowlabels_target)
# Delete any rows that appear in both the workload data and the target
# data from the workload data
dups_filter = np.ones(X_workload.shape[0], dtype=bool)
target_row_tups = [tuple(row) for row in X_target]
for i, row in enumerate(X_workload):
if tuple(row) in target_row_tups:
dups_filter[i] = False
X_workload = X_workload[dups_filter, :]
y_workload = y_workload[dups_filter, :]
rowlabels_workload = rowlabels_workload[dups_filter]
# Combine target & workload Xs for preprocessing
X_matrix = np.vstack([X_target, X_workload])
# Dummy encode categorial variables
categorical_info = DataUtil.dummy_encoder_helper(X_columnlabels,
mapped_workload.dbms)
dummy_encoder = DummyEncoder(categorical_info['n_values'],
categorical_info['categorical_features'],
categorical_info['cat_columnlabels'],
categorical_info['noncat_columnlabels'])
X_matrix = dummy_encoder.fit_transform(X_matrix)
# below two variables are needed for correctly determing max/min on dummies
binary_index_set = set(categorical_info['binary_vars'])
total_dummies = dummy_encoder.total_dummies()
# Scale to N(0, 1)
X_scaler = StandardScaler()
X_scaled = X_scaler.fit_transform(X_matrix)
if y_target.shape[0] < 5: # FIXME
# FIXME (dva): if there are fewer than 5 target results so far
# then scale the y values (metrics) using the workload's
# y_scaler. I'm not sure if 5 is the right cutoff.
y_target_scaler = None
y_workload_scaler = StandardScaler()
y_matrix = np.vstack([y_target, y_workload])
y_scaled = y_workload_scaler.fit_transform(y_matrix)
else:
# FIXME (dva): otherwise try to compute a separate y_scaler for
# the target and scale them separately.
try:
y_target_scaler = StandardScaler()
y_workload_scaler = StandardScaler()
y_target_scaled = y_target_scaler.fit_transform(y_target)
y_workload_scaled = y_workload_scaler.fit_transform(y_workload)
y_scaled = np.vstack([y_target_scaled, y_workload_scaled])
except ValueError:
y_target_scaler = None
y_workload_scaler = StandardScaler()
y_scaled = y_workload_scaler.fit_transform(y_target)
# Set up constraint helper
constraint_helper = ParamConstraintHelper(scaler=X_scaler,
encoder=dummy_encoder,
binary_vars=categorical_info['binary_vars'],
init_flip_prob=INIT_FLIP_PROB,
flip_prob_decay=FLIP_PROB_DECAY)
# FIXME (dva): check if these are good values for the ridge
# ridge = np.empty(X_scaled.shape[0])
# ridge[:X_target.shape[0]] = 0.01
# ridge[X_target.shape[0]:] = 0.1
# FIXME: we should generate more samples and use a smarter sampling
# technique
num_samples = NUM_SAMPLES
X_samples = np.empty((num_samples, X_scaled.shape[1]))
X_min = np.empty(X_scaled.shape[1])
X_max = np.empty(X_scaled.shape[1])
knobs_mem = KnobCatalog.objects.filter(
dbms=newest_result.session.dbms, tunable=True, resource=1)
knobs_mem_catalog = {k.name: k for k in knobs_mem}
mem_max = newest_result.workload.hardware.memory
X_mem = np.zeros([1, X_scaled.shape[1]])
X_default = np.empty(X_scaled.shape[1])
# Get default knob values
for i, k_name in enumerate(X_columnlabels):
k = KnobCatalog.objects.filter(dbms=newest_result.session.dbms, name=k_name)[0]
X_default[i] = k.default
X_default_scaled = X_scaler.transform(X_default.reshape(1, X_default.shape[0]))[0]
# Determine min/max for knob values
for i in range(X_scaled.shape[1]):
if i < total_dummies or i in binary_index_set:
col_min = 0
col_max = 1
else:
col_min = X_scaled[:, i].min()
col_max = X_scaled[:, i].max()
if X_columnlabels[i] in knobs_mem_catalog:
X_mem[0][i] = mem_max * 1024 * 1024 * 1024 # mem_max GB
col_max = min(col_max, X_scaler.transform(X_mem)[0][i])
# Set min value to the default value
# FIXME: support multiple methods can be selected by users
col_min = X_default_scaled[i]
X_min[i] = col_min
X_max[i] = col_max
X_samples[:, i] = np.random.rand(num_samples) * (col_max - col_min) + col_min
# Maximize the throughput, moreisbetter
# Use gradient descent to minimize -throughput
if not lessisbetter:
y_scaled = -y_scaled
q = queue.PriorityQueue()
for x in range(0, y_scaled.shape[0]):
q.put((y_scaled[x][0], x))
i = 0
while i < TOP_NUM_CONFIG:
try:
item = q.get_nowait()
# Tensorflow get broken if we use the training data points as
# starting points for GPRGD. We add a small bias for the
# starting points. GPR_EPS default value is 0.001
# if the starting point is X_max, we minus a small bias to
# make sure it is within the range.
dist = sum(np.square(X_max - X_scaled[item[1]]))
if dist < 0.001:
X_samples = np.vstack((X_samples, X_scaled[item[1]] - abs(GPR_EPS)))
else:
X_samples = np.vstack((X_samples, X_scaled[item[1]] + abs(GPR_EPS)))
i = i + 1
except queue.Empty:
break
model = GPRGD(length_scale=DEFAULT_LENGTH_SCALE,
magnitude=DEFAULT_MAGNITUDE,
max_train_size=MAX_TRAIN_SIZE,
batch_size=BATCH_SIZE,
num_threads=NUM_THREADS,
learning_rate=DEFAULT_LEARNING_RATE,
epsilon=DEFAULT_EPSILON,
max_iter=MAX_ITER,
sigma_multiplier=DEFAULT_SIGMA_MULTIPLIER,
mu_multiplier=DEFAULT_MU_MULTIPLIER)
model.fit(X_scaled, y_scaled, X_min, X_max, ridge=DEFAULT_RIDGE)
res = model.predict(X_samples, constraint_helper=constraint_helper)
best_config_idx = np.argmin(res.minl.ravel())
best_config = res.minl_conf[best_config_idx, :]
best_config = X_scaler.inverse_transform(best_config)
# Decode one-hot encoding into categorical knobs
best_config = dummy_encoder.inverse_transform(best_config)
# Although we have max/min limits in the GPRGD training session, it may
# lose some precisions. e.g. 0.99..99 >= 1.0 may be True on the scaled data,
# when we inversely transform the scaled data, the different becomes much larger
# and cannot be ignored. Here we check the range on the original data
# directly, and make sure the recommended config lies within the range
X_min_inv = X_scaler.inverse_transform(X_min)
X_max_inv = X_scaler.inverse_transform(X_max)
best_config = np.minimum(best_config, X_max_inv)
best_config = np.maximum(best_config, X_min_inv)
conf_map = {k: best_config[i] for i, k in enumerate(X_columnlabels)}
conf_map_res = {}
conf_map_res['status'] = 'good'
conf_map_res['recommendation'] = conf_map
conf_map_res['info'] = 'INFO: training data size is {}'.format(X_scaled.shape[0])
return conf_map_res
def load_data_helper(filtered_pipeline_data, workload, task_type):
pipeline_data = filtered_pipeline_data.get(workload=workload,
task_type=task_type)
LOG.debug("PIPELINE DATA: %s", str(pipeline_data.data))
return JSONUtil.loads(pipeline_data.data)
@task(base=MapWorkload, name='map_workload')
def map_workload(target_data):
# Get the latest version of pipeline data that's been computed so far.
latest_pipeline_run = PipelineRun.objects.get_latest()
if target_data['bad']:
assert target_data is not None
return target_data
assert latest_pipeline_run is not None
newest_result = Result.objects.get(pk=target_data['newest_result_id'])
target_workload = newest_result.workload
X_columnlabels = np.array(target_data['X_columnlabels'])
y_columnlabels = | np.array(target_data['y_columnlabels']) | numpy.array |
# module import
import gc
import os
import copy
import random
import platform
import numpy as np
import pickle as p
import pandas as pd
import multiprocessing as mp
from numpy.matlib import repmat
# scipy module imports
from scipy.stats import norm, linregress
from scipy.spatial.distance import *
from scipy.interpolate import PchipInterpolator as pchip
from scipy.interpolate import InterpolatedUnivariateSpline as IUS
from scipy.interpolate import interp1d
from scipy.optimize import curve_fit
from scipy.signal import periodogram, hamming, boxcar, find_peaks
# sklearn module imports
from sklearn.linear_model import LinearRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
# statsmodels module imports
from statsmodels.nonparametric.smoothers_lowess import lowess
# pyqt5 module import
from PyQt5.QtCore import QThread, pyqtSignal
# custom module imports
import analysis_guis.common_func as cf
import analysis_guis.calc_functions as cfcn
import analysis_guis.rotational_analysis as rot
from analysis_guis.dialogs.rotation_filter import RotationFilteredData
from analysis_guis.cluster_read import ClusterRead
from probez.spike_handling import spike_io
# other parameters
dcopy = copy.deepcopy
default_dir_file = os.path.join(os.getcwd(), 'default_dir.p')
interp_arr = lambda xi, y: np.vstack([interp1d(np.linspace(0, 1, len(x)), x, kind='nearest')(xi) for x in y])
cell_perm_ind = lambda n_cell_tot, n_cell: np.sort(np.random.permutation(n_cell_tot)[:n_cell])
set_sf_cell_perm = lambda spd_sf, n_pool, n_cell: [x[:, :, cell_perm_ind(n_pool, n_cell)] for x in spd_sf]
grp_expt_indices = lambda i_expt0: [np.where(i_expt0 == i)[0] for i in np.unique(i_expt0)]
# lambda function declarations
lin_func = lambda x, a: a * x
########################################################################################################################
########################################################################################################################
class WorkerThread(QThread):
# creates the signal object
work_started = pyqtSignal()
work_progress = pyqtSignal(str, float)
work_finished = pyqtSignal(object)
work_error = pyqtSignal(str, str)
work_plot = pyqtSignal(object)
def __init__(self, parent=None, main_gui=None):
# creates the worker object
super(WorkerThread, self).__init__(parent)
self.update_pbar = True
self.is_running = False
self.forced_quit = False
self.sub_job = None
self.is_ok = True
self.data = None
# other initialisations
self.main_gui = main_gui
self.thread_job_primary = None
self.thread_job_secondary = None
self.thread_job_para = None
def set_worker_func_type(self, thread_job_primary, thread_job_secondary=None, thread_job_para=None):
'''
:param func_type:
:return:
'''
# updates the worker primary/secondary job type and parameters
self.thread_job_primary = thread_job_primary
self.thread_job_secondary = thread_job_secondary
self.thread_job_para = thread_job_para
def run(self):
'''
:return:
'''
# initialisations
w_prog, w_err = self.work_progress, self.work_error
# updates the running/forced quit flagsv
self.is_running = True
self.forced_quit = False
self.is_ok = True
# updates the running parameter and enables the progress group parameters
self.work_started.emit()
# runs the job based on the type
thread_data = None
if self.thread_job_primary == 'init_data_file':
# case is initialising the data file
self.init_cluster_data()
elif self.thread_job_primary == 'init_pool_object':
# case is initialising the pool worker object
thread_data = self.init_pool_worker()
##################################
#### DATA I/O FUNCTIONS ####
##################################
elif self.thread_job_primary == 'load_data_files':
# case is loading the data files
thread_data = self.load_data_file()
elif self.thread_job_primary == 'save_multi_expt_file':
# retrieves the parameters
data, out_info = self.thread_job_para[0], self.thread_job_para[1]
# case is loading the data files
thread_data = self.save_multi_expt_file(data, out_info)
elif self.thread_job_primary == 'save_multi_comp_file':
# retrieves the parameters
data, out_info = self.thread_job_para[0], self.thread_job_para[1]
# case is loading the data files
thread_data = self.save_multi_comp_file(data, out_info)
elif self.thread_job_primary == 'run_calc_func':
# case is the calculation functions
calc_para, plot_para = self.thread_job_para[0], self.thread_job_para[1]
data, pool, g_para = self.thread_job_para[2], self.thread_job_para[3], self.thread_job_para[4]
################################################
#### CLUSTER CLASSIFICATION FUNCTIONS ####
################################################
if self.thread_job_secondary == 'Fixed/Free Cluster Matching':
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['clust'])
# case is determining the cluster matches
self.det_cluster_matches(data, calc_para, w_prog)
elif self.thread_job_secondary == 'Cluster Cross-Correlogram':
# case is the cc-gram type determinations
thread_data = self.calc_ccgram_types(calc_para, data.cluster)
######################################
#### AHV ANALYSIS FUNCTIONS ####
######################################
elif ' (Fixed)' in self.thread_job_secondary or \
(self.thread_job_secondary == 'Correlation Significance Overlap'):
# ensures the smoothing window is an odd integer (if smoothing)
if calc_para['is_smooth']:
if calc_para['n_smooth'] % 2 != 1:
# if not, then output an error message to screen
e_str = 'The median smoothing filter window span must be an odd integer.'
w_err.emit(e_str, 'Incorrect Smoothing Window Span')
# sets the error flag and exits the function
self.is_ok = False
self.work_finished.emit(thread_data)
return
# initialises the rotation filter class object (if not already set)
if plot_para['rot_filt'] is None:
plot_para['rot_filt'] = cf.init_rotation_filter_data(False)
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['vel', 'vel_sf_fix'], other_para=False)
# calculates the shuffled kinematic spiking frequencies
cfcn.calc_binned_kinemetic_spike_freq(data, plot_para, dcopy(calc_para), w_prog, roc_calc=False)
cfcn.calc_shuffled_kinematic_spike_freq(data, dcopy(calc_para), w_prog)
# runs any specific additional function
fit_func = ['Correlation Comparison (Fixed)',
'Correlation Fit Parameters (Fixed)',
'Individual Cell Correlation (Fixed)']
if self.thread_job_secondary in fit_func:
# case is the correlation fit parameters
self.calc_corr_fit_para(data, plot_para, dcopy(calc_para), w_prog)
elif (' (Freely Moving)' in self.thread_job_secondary):
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['vel_sf_free'], other_para=False)
# updates the bin velocity
data.rotation.vel_bin_corr = calc_para['vel_bin']
elif 'Fixed/Free Spiking Correlation' in self.thread_job_secondary:
# determines if the freely moving data file has been loaded
if not hasattr(data.externd, 'free_data'):
# if the data-file has not been loaded then output an error to screen and exit
e_str = 'The freely moving spiking frequency/statistics data file must be loaded ' \
'before being able to run this function.\n\nPlease load this data file and try again.'
w_err.emit(e_str, 'Freely Moving Data Missing?')
# exits the function with an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['ff_corr', 'vel'], other_para=False)
# calculates the shuffled kinematic spiking frequencies
cfcn.calc_binned_kinemetic_spike_freq(data, plot_para, calc_para, w_prog, roc_calc=False, use_raw=True)
# calculates the fixed/free correlations (if not already set)
if not data.comp.ff_corr.is_set:
self.calc_fix_free_correlation(data, calc_para, w_prog)
################################################
#### FREELY MOVING ANALYSIS FUNCTIONS ####
################################################
elif self.thread_job_secondary == 'Freely Moving Cell Fit Residual':
# ensures the calculation fields are
self.calc_cell_fit_residual(data, calc_para, w_prog)
######################################
#### EYE TRACKING FUNCTIONS ####
######################################
elif self.thread_job_secondary in ['Eye Movement Event Signals']:
# check to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['eye_track'])
# calculates the eye-tracking metrics (if not calculated)
if len(data.externd.eye_track.t_evnt) == 0:
self.calc_eye_track_metrics(data, calc_para, w_prog)
elif 'Eye Movement Correlation' in self.thread_job_secondary:
# check to see if any parameters have been altered/
self.check_altered_para(data, calc_para, plot_para, g_para, ['eye_track'])
# calculates the eye-tracking metrics (if not calculated)
if len(data.externd.eye_track.t_evnt) == 0:
self.calc_eye_track_metrics(data, calc_para, w_prog)
# calculates the eye-tracking metrics
if len(data.externd.eye_track.t_sp_h) == 0:
self.calc_eye_track_corr(data, calc_para, w_prog)
######################################
#### ROC ANALYSIS FUNCTIONS ####
######################################
elif self.thread_job_secondary == 'Direction ROC Curves (Single Cell)':
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['condition'])
# case is the shuffled cluster distances
if not self.calc_cond_roc_curves(data, pool, calc_para, plot_para, g_para, False, 100.):
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif self.thread_job_secondary == 'Direction ROC Curves (Whole Experiment)':
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['condition', 'phase'])
# calculates the phase roc-curves for each cell
if not self.calc_cond_roc_curves(data, pool, calc_para, plot_para, g_para, False, 33.):
self.is_ok = False
self.work_finished.emit(thread_data)
return
# calculates the phase roc curve/significance values
self.calc_phase_roc_curves(data, calc_para, 66.)
self.calc_phase_roc_significance(calc_para, g_para, data, pool, 100.)
elif self.thread_job_secondary in ['Direction ROC AUC Histograms',
'Direction ROC Spiking Rate Heatmap']:
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['condition'])
# calculates the phase roc-curves for each cell
if not self.calc_cond_roc_curves(data, pool, calc_para, plot_para, g_para, True, 100., True):
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif 'Velocity ROC Curves' in self.thread_job_secondary:
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['vel'], other_para=True)
# calculates the binned kinematic spike frequencies
cfcn.calc_binned_kinemetic_spike_freq(data, plot_para, calc_para, w_prog)
self.calc_kinematic_roc_curves(data, pool, calc_para, g_para, 50.)
elif self.thread_job_secondary == 'Velocity ROC Significance':
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['vel'], other_para=True)
# calculates the binned kinematic spike frequencies
cfcn.calc_binned_kinemetic_spike_freq(data, plot_para, calc_para, w_prog)
# calculates the kinematic roc curves and their significance
self.calc_kinematic_roc_curves(data, pool, calc_para, g_para, 0.)
self.calc_kinematic_roc_significance(data, calc_para, g_para)
elif self.thread_job_secondary == 'Condition ROC Curve Comparison':
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['phase'])
# calculates the phase roc-curves for each cell
if not self.calc_cond_roc_curves(data, pool, calc_para, plot_para, g_para, True, 33.):
self.is_ok = False
self.work_finished.emit(thread_data)
return
# calculates the phase roc curve/significance values
self.calc_phase_roc_curves(data, calc_para, 66.)
self.calc_phase_roc_significance(calc_para, g_para, data, pool, 100.)
elif self.thread_job_secondary == 'Direction ROC Significance':
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['condition', 'phase'])
# calculates the phase roc-curves for each cell
if not self.calc_cond_roc_curves(data, pool, calc_para, plot_para, g_para, True, 33.,
force_black_calc=True):
self.is_ok = False
self.work_finished.emit(thread_data)
return
# calculates the phase roc curve/significance values
self.calc_phase_roc_curves(data, calc_para, 66.)
self.calc_phase_roc_significance(calc_para, g_para, data, pool, 100.)
if cf.det_valid_vis_expt(data, True):
if not self.calc_dirsel_group_types(data, pool, calc_para, plot_para, g_para):
self.is_ok = False
self.work_finished.emit(thread_data)
return
###############################################
#### COMBINED ANALYSIS LDA FUNCTIONS ####
###############################################
elif self.thread_job_secondary == 'Rotation/Visual Stimuli Response Statistics':
# calculates the phase roc curve/significance values
self.calc_phase_roc_curves(data, calc_para, 50.)
# calculates the direction/selection group types
if not self.calc_dirsel_group_types(data, pool, calc_para, plot_para, g_para):
self.is_ok = False
self.work_finished.emit(thread_data)
elif self.thread_job_secondary == 'Combined Direction ROC Curves (Whole Experiment)':
# checks that the conditions are correct for running the function
if not self.check_combined_conditions(calc_para, plot_para):
self.is_ok = False
self.work_finished.emit(thread_data)
return
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['condition', 'phase', 'visual'])
# initisalises the rotational filter (if not initialised already)
if plot_para['rot_filt'] is None:
plot_para['rot_filt'] = cf.init_rotation_filter_data(False)
# adds motordrifting (if the visual expt type)
_plot_para, _calc_para = dcopy(plot_para), dcopy(calc_para)
if calc_para['vis_expt_type'] == 'MotorDrifting':
_plot_para['rot_filt']['t_type'].append('MotorDrifting')
# resets the flags to use the full rotation/visual phases
_calc_para['use_full_rot'], _calc_para['use_full_vis'] = True, True
# calculates the phase roc-curves for each cell
if not self.calc_cond_roc_curves(data, pool, _calc_para, _plot_para, g_para, False, 33.):
self.is_ok = False
self.work_finished.emit(thread_data)
return
# calculates the phase roc curve/significance values
self.calc_phase_roc_curves(data, _calc_para, 66.)
if (calc_para['vis_expt_type'] == 'UniformDrifting') and \
(calc_para['grp_stype'] != 'Wilcoxon Paired Test'):
# sets up the visual rotation filter
r_filt_v = cf.init_rotation_filter_data(False)
r_filt_v['t_type'], r_filt_v['is_ud'], r_filt_v['t_cycle'] = ['UniformDrifting'], [True], ['15']
# retrieves the visual filter object
plot_exp_name, plot_all_expt = plot_para['plot_exp_name'], plot_para['plot_all_expt']
r_obj_vis, ind_type = cf.split_unidrift_phases(data, r_filt_v, None, plot_exp_name, plot_all_expt,
'Whole Experiment', 2.)
# calculates the full uniform-drifting curves
self.calc_ud_roc_curves(data, r_obj_vis, ind_type, 66.)
# calculates the direction selection types
if not self.calc_dirsel_group_types(data, pool, _calc_para, _plot_para, g_para):
self.is_ok = False
# calculates the partial roc curves
self.calc_partial_roc_curves(data, calc_para, plot_para, 66.)
elif self.thread_job_secondary in ['Normalised Kinematic Spiking Frequency']:
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['vel'], other_para=False)
# calculates the binned kinematic spike frequencies
cfcn.calc_binned_kinemetic_spike_freq(data, plot_para, calc_para, w_prog, roc_calc=False)
######################################################
#### DEPTH-BASED SPIKING ANALYSIS FUNCTIONS ####
######################################################
elif self.thread_job_secondary == 'Depth Spiking Rate Comparison':
# make a copy of the plotting/calculation parameters
_plot_para, _calc_para, r_data = dcopy(plot_para), dcopy(calc_para), data.depth
_plot_para['plot_exp_name'] = None
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['condition', 'phase', 'visual'])
# reduces the data clusters to only include the RSPd/RSPg cells
_data = cfcn.get_rsp_reduced_clusters(data)
# calculates the phase roc-curves for each cell
if not self.calc_cond_roc_curves(_data, pool, _calc_para, _plot_para, g_para, True,
33., r_data=r_data, force_black_calc=True):
self.is_ok = False
self.work_finished.emit(thread_data)
return
# calculates the phase roc curve/significance values
self.calc_phase_roc_curves(_data, _calc_para, 66., r_data=r_data)
############################################
#### SPIKING FREQUENCY CALCULATION ####
############################################
# initialisations
r_filt = _plot_para['rot_filt']
r_data.ch_depth, r_data.ch_region, r_data.ch_layer = \
cfcn.get_channel_depths_tt(_data._cluster, r_filt['t_type'])
t_ofs, t_phase = cfcn.get_rot_phase_offsets(calc_para)
# rotation filtered object calculation
r_obj_rot = RotationFilteredData(_data, r_filt, None, None, True, 'Whole Experiment', False,
t_ofs=t_ofs, t_phase=t_phase)
# calculates the individual trial/mean spiking rates and sets up the plot/stats arrays
sp_f0_rot, sp_f_rot = cf.calc_phase_spike_freq(r_obj_rot)
s_plt, _, sf_stats, ind = cf.setup_spike_freq_plot_arrays(r_obj_rot, sp_f0_rot, sp_f_rot, None, 3)
r_data.plt, r_data.stats, r_data.ind, r_data.r_filt = s_plt, sf_stats, ind, dcopy(r_filt)
elif self.thread_job_secondary == 'Depth Spiking Rate Comparison (Multi-Sensory)':
# checks that the conditions are correct for running the function
if not self.check_combined_conditions(calc_para, plot_para):
self.is_ok = False
self.work_finished.emit(thread_data)
return
else:
# otherwise, make a copy of the plotting/calculation parameters
_plot_para, _calc_para, r_data = dcopy(plot_para), dcopy(calc_para), data.depth
_plot_para['plot_exp_name'], r_filt = None, _plot_para['rot_filt']
t_ofs, t_phase = cfcn.get_rot_phase_offsets(calc_para)
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['condition', 'phase', 'visual'])
# adds motordrifting (if it is the visual expt type)
if calc_para['vis_expt_type'] == 'MotorDrifting':
_plot_para['rot_filt']['t_type'].append('MotorDrifting')
# reduces the data clusters to only include the RSPd/RSPg cells
_data = cfcn.get_rsp_reduced_clusters(data)
# calculates the phase roc-curves for each cell
if not self.calc_cond_roc_curves(_data, pool, _calc_para, _plot_para, g_para, False, 33., r_data=r_data):
self.is_ok = False
self.work_finished.emit(thread_data)
return
# calculates the phase roc curve/significance values
self.calc_phase_roc_curves(_data, _calc_para, 66., r_data=r_data)
if (calc_para['vis_expt_type'] == 'UniformDrifting'):
# sets up the visual rotation filter
r_filt_v = cf.init_rotation_filter_data(False)
r_filt_v['t_type'], r_filt_v['is_ud'], r_filt_v['t_cycle'] = ['UniformDrifting'], [True], ['15']
# retrieves the visual filter object
r_obj_vis, ind_type = cf.split_unidrift_phases(_data, r_filt_v, None, None, True,
'Whole Experiment', 2., t_phase, t_ofs)
# calculates the full uniform-drifting curves
self.calc_ud_roc_curves(_data, r_obj_vis, ind_type, 66., r_data=r_data)
# calculates the individual trial/mean spiking rates and sets up the plot/stats arrays
sp_f0, sp_f = cf.calc_phase_spike_freq(r_obj_vis)
s_plt, _, sf_stats, ind = cf.setup_spike_freq_plot_arrays(r_obj_vis, sp_f0, sp_f, ind_type, 2)
r_data.plt_vms, r_data.stats_vms, r_data.ind_vms = s_plt, sf_stats, ind, r_filt_v
r_data.r_filt_vms = dcopy(r_filt_v)
else:
# resets the uniform drifting fields
r_data.plt_vms, r_data.stats_vms, r_data.ind_vms, r_data.r_filt_vms = None, None, None, None
############################################
#### SPIKING FREQUENCY CALCULATION ####
############################################
# rotation filtered object calculation
r_obj_rot = RotationFilteredData(_data, r_filt, None, None, True, 'Whole Experiment', False,
t_phase=t_phase, t_ofs=t_ofs)
r_data.ch_depth_ms, r_data.ch_region_ms, r_data.ch_layer_ms = \
cfcn.get_channel_depths_tt(_data._cluster, r_filt['t_type'])
# calculates the individual trial/mean spiking rates and sets up the plot/stats arrays
sp_f0_rot, sp_f_rot = cf.calc_phase_spike_freq(r_obj_rot)
s_plt, _, sf_stats, ind = cf.setup_spike_freq_plot_arrays(r_obj_rot, sp_f0_rot, sp_f_rot, None, 3)
r_data.plt_rms, r_data.stats_rms, r_data.ind_rms = s_plt, sf_stats, ind
r_data.r_filt_rms = dcopy(r_filt)
##########################################################
#### ROTATION DISCRIMINATION ANALYSIS FUNCTIONS ####
##########################################################
elif self.thread_job_secondary == 'Rotation Direction LDA':
# if the solver parameter have not been set, then initalise them
d_data = data.discrim.dir
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=d_data)
# sets up the lda values
r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, calc_para, d_data,
w_prog, w_err=w_err)
if status == 0:
# if there was an error in the calculations, then return an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif status == 2:
# if an update in the calculations is required, then run the rotation LDA analysis
if not cfcn.run_rot_lda(data, calc_para, r_filt, i_expt, i_cell, n_trial_max,
d_data=d_data, w_prog=w_prog):
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif self.thread_job_secondary == 'Temporal Duration/Offset LDA':
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.temp)
# if the temporal data parameters have changed/has not been initialised then calculate the values
if data.discrim.temp.lda is None:
# checks to see if any base LDA calculation parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.temp)
# sets up the important arrays for the LDA
r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, calc_para, data.discrim.temp,
w_prog, w_err=w_err)
if status == 0:
# if there was an error in the calculations, then return an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
# if an update in the calculations is required, then run the temporal LDA analysis
if status == 2:
if not self.run_temporal_lda(data, calc_para, r_filt, i_expt, i_cell, n_trial_max):
# if there was an error in the calculations, then return an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif self.thread_job_secondary == 'Individual LDA':
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.indiv)
self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.dir)
# sets up the important arrays for the LDA
r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, calc_para, data.discrim.dir,
w_prog, True, w_err=w_err)
if status == 0:
# if there was an error in the calculations, then return an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif status == 2:
# if an update in the calculations is required, then run the rotation LDA analysis
if not cfcn.run_rot_lda(data, calc_para, r_filt, i_expt, i_cell, n_trial_max,
d_data=data.discrim.dir, w_prog=w_prog):
self.is_ok = False
self.work_finished.emit(thread_data)
return
# if the individual data parameters have changed/has not been initialised then calculate the values
if data.discrim.indiv.lda is None:
# runs the individual LDA
if not self.run_individual_lda(data, calc_para, r_filt, i_expt, i_cell, n_trial_max):
# if there was an error in the calculations, then return an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif self.thread_job_secondary == 'Shuffled LDA':
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.shuffle)
self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.dir)
# sets up the important arrays for the LDA
r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, calc_para, data.discrim.dir,
w_prog, True, w_err=w_err)
if status == 0:
# if there was an error in the calculations, then return an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif status == 2:
# if an update in the calculations is required, then run the rotation LDA analysis
if not cfcn.run_rot_lda(data, calc_para, r_filt, i_expt, i_cell, n_trial_max,
d_data=data.discrim.dir, w_prog=w_prog):
self.is_ok = False
self.work_finished.emit(thread_data)
return
# runs the shuffled LDA
if not self.run_shuffled_lda(data, calc_para, r_filt, i_expt, i_cell, n_trial_max):
# if there was an error in the calculations, then return an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif self.thread_job_secondary == 'Pooled Neuron LDA':
# resets the minimum cell count and checks if the pooled parameters have been altered
# calc_para['lda_para']['n_cell_min'] = calc_para['n_cell_min']
self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.part)
# if the pooled data parameters have changed/has not been initialised then calculate the values
if data.discrim.part.lda is None:
# checks to see if any base LDA calculation parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.dir)
# sets up the important arrays for the LDA
r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, calc_para, data.discrim.dir,
w_prog, True, w_err=w_err)
if not calc_para['pool_expt']:
if status == 0:
# if there was an error in the calculations, then return an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
# elif status == 2:
# # if an update in the calculations is required, then run the rotation LDA analysis
# if not cfcn.run_rot_lda(data, calc_para, r_filt, i_expt, i_cell, n_trial_max,
# d_data=data.discrim.dir, w_prog=w_prog):
# self.is_ok = False
# self.work_finished.emit(thread_data)
# return
# runs the partial LDA
if not self.run_pooled_lda(pool, data, calc_para, r_filt, i_expt, i_cell, n_trial_max):
# if there was an error in the calculations, then return an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif self.thread_job_secondary == 'Individual Cell Accuracy Filtered LDA':
# check to see if the individual LDA calculations have been performed
if data.discrim.indiv.lda is None:
# if the individual LDA has not been run, then output an error to screen
e_str = 'The Individual LDA must be run first before this analysis can be performed'
w_err.emit(e_str, 'Missing Individual LDA Data')
# sets the ok flag to false and exit the function
self.is_ok = False
self.work_finished.emit(thread_data)
return
#
_calc_para = dcopy(calc_para)
_calc_para['comp_cond'] = dcopy(data.discrim.indiv.ttype)
#########################################
#### ROTATION LDA CALCULATIONS ####
#########################################
# sets the min/max accuracy values
_calc_para['lda_para']['y_acc_min'] = 0
_calc_para['lda_para']['y_acc_max'] = 100
# checks to see if any base LDA calculation parameters have been altered
self.check_altered_para(data, _calc_para, g_para, ['lda'], other_para=data.discrim.dir)
# sets up the important arrays for the LDA
r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, _calc_para, data.discrim.dir,
w_prog, True, w_err=w_err)
if status == 0:
# if there was an error in the calculations, then return an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif status == 2:
# if an update in the calculations is required, then run the rotation LDA analysis
if not cfcn.run_rot_lda(data, _calc_para, r_filt, i_expt, i_cell, n_trial_max,
d_data=data.discrim.dir, w_prog=w_prog, pW=50.):
self.is_ok = False
self.work_finished.emit(thread_data)
return
#########################################
#### FILTERED LDA CALCULATIONS ####
#########################################
# sets the min/max accuracy values
_calc_para['lda_para']['y_acc_min'] = _calc_para['y_acc_min']
_calc_para['lda_para']['y_acc_max'] = _calc_para['y_acc_max']
# checks to see if any base LDA calculation parameters have been altered
self.check_altered_para(data, _calc_para, g_para, ['lda'], other_para=data.discrim.filt)
# sets up the important arrays for the LDA
r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, _calc_para, data.discrim.filt,
w_prog, True, w_err=w_err)
if status == 0:
# if there was an error in the calculations, then return an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif status == 2:
# if an update in the calculations is required, then run the rotation LDA analysis
if not cfcn.run_rot_lda(data, _calc_para, r_filt, i_expt, i_cell, n_trial_max,
d_data=data.discrim.filt, w_prog=w_prog, pW=50., pW0=50.):
self.is_ok = False
self.work_finished.emit(thread_data)
return
else:
# otherwise, update the calculation parameters
data.discrim.filt.yaccmn = _calc_para['y_acc_min']
data.discrim.filt.yaccmx = _calc_para['y_acc_max']
elif self.thread_job_secondary == 'LDA Group Weightings':
# checks to see if the data class as changed parameters
d_data, w_prog = data.discrim.wght, self.work_progress
self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=d_data)
# sets up the lda values
r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, calc_para, d_data,
w_prog, w_err=w_err)
if status == 0:
# if there was an error in the calculations, then return an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif status == 2:
# if an update in the calculations is required, then run the rotation LDA analysis
if not self.run_wght_lda(data, calc_para, r_filt, i_expt, i_cell, n_trial_max):
self.is_ok = False
self.work_finished.emit(thread_data)
return
#######################################################
#### SPEED DISCRIMINATION ANALYSIS FUNCTIONS ####
#######################################################
elif self.thread_job_secondary == 'Speed LDA Accuracy':
# checks to see if any base LDA calculation parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.spdacc)
# if the pooled data parameters have changed/has not been initialised then calculate the values
if data.discrim.spdc.lda is None:
# sets up the important arrays for the LDA
r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, calc_para, data.discrim.spdacc,
w_prog, True, w_err=w_err)
if status == 0:
# if there was an error in the calculations, then return an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif status == 2:
if not self.run_speed_lda_accuracy(data, calc_para, r_filt, i_expt, i_cell, n_trial_max, w_prog):
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif self.thread_job_secondary == 'Speed LDA Comparison (Individual Experiments)':
# checks to see if any base LDA calculation parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.spdc)
# if the pooled data parameters have changed/has not been initialised then calculate the values
if data.discrim.spdc.lda is None:
# sets up the important arrays for the LDA
r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, calc_para, data.discrim.spdc,
w_prog, True, w_err=w_err)
if status == 0:
# if there was an error in the calculations, then return an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif status == 2:
# if an update in the calculations is required, then run the rotation LDA analysis
if not self.run_kinematic_lda(data, calc_para, r_filt, i_expt, i_cell, n_trial_max, w_prog):
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif self.thread_job_secondary == 'Speed LDA Comparison (Pooled Experiments)':
# checks to see if any base LDA calculation parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.spdcp)
# if the pooled data parameters have changed/has not been initialised then calculate the values
if data.discrim.spdcp.lda is None:
# sets up the important arrays for the LDA
r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, calc_para, data.discrim.spdcp,
w_prog, True, w_err=w_err)
if status == 0:
# if there was an error in the calculations, then return an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
# elif status == 2:/
# if an update in the calculations is required, then run the rotation LDA analysis
if not self.run_pooled_kinematic_lda(data, calc_para, r_filt, i_expt, i_cell, n_trial_max,
w_prog):
self.is_ok = False
self.work_finished.emit(thread_data)
return
# # calculates the psychometric curves
# w_prog.emit('Calculating Pyschometric Curves', 100.)
# cfcn.calc_all_psychometric_curves(data.discrim.spdcp, float(calc_para['vel_bin']), calc_para['use_all'])
elif self.thread_job_secondary == 'Velocity Direction Discrimination LDA':
# checks to see if any base LDA calculation parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.spddir)
# if the pooled data parameters have changed/has not been initialised then calculate the values
if data.discrim.spddir.lda is None:
# sets up the important arrays for the LDA
r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, calc_para, data.discrim.spddir,
w_prog, True, w_err=w_err)
if status == 0:
# if there was an error in the calculations, then return an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif status == 2:
if not self.run_speed_dir_lda_accuracy(data, calc_para, r_filt, i_expt, i_cell,
n_trial_max, w_prog):
self.is_ok = False
self.work_finished.emit(thread_data)
return
#######################################
#### MISCELLANEOUS FUNCTIONS ####
#######################################
elif self.thread_job_secondary == 'Velocity Multilinear Regression Dataframe Output':
# checks to see if any base spiking frequency dataframe parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['spikedf'], other_para=data.spikedf)
# checks to see if the overlap duration is less than the time bin size
if calc_para['t_over'] >= calc_para['bin_sz']:
# if not, then output an error to screen
e_str = 'Bin Overlap Duration must be less than the Time Bin Size.\n' \
'Reset these parameters before running this function.'
w_err.emit(e_str, 'Incorrect Function Parameters')
# exits the function with an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
# only continue if the spiking frequency dataframe has not been set up
if not data.spikedf.is_set:
self.setup_spiking_freq_dataframe(data, calc_para)
elif self.thread_job_secondary == 'Autocorrelogram Theta Index Calculations':
# case to see if any parameters have changed
self.check_altered_para(data, calc_para, plot_para, g_para, ['theta'], other_para=data.theta_index)
# only continue if the theta index dataframe has not been setup
if not data.theta_index.is_set:
self.calc_auto_ccgram_fft(data, calc_para)
###############################
#### OTHER FUNCTIONS ####
###############################
elif self.thread_job_secondary == 'Shuffled Cluster Distances':
# case is the shuffled cluster distances
thread_data = self.calc_shuffled_cluster_dist(calc_para, data.cluster)
elif self.thread_job_primary == 'update_plot':
pass
# emits the finished work signal
self.work_finished.emit(thread_data)
############################################
#### THREAD CALCULATION FUNCTIONS ####
############################################
def load_data_file(self):
'''
:param exp_file:
:return:
'''
# retrieves the job parameters
load_dlg, loaded_exp, is_multi = self.thread_job_para[0], self.thread_job_para[1], self.thread_job_para[2]
if not np.any([not x in loaded_exp for x in load_dlg.exp_name]):
# if there are no new experiments to load, then exit the function
return None
else:
n_file = len(load_dlg.exp_files)
dpw, p_rlx, data = 1.0 / n_file, 0.05, []
_, f_extn = os.path.splitext(load_dlg.exp_files[0])
#
for i_file in range(n_file):
if not self.is_running:
# if the user cancelled, then exit
return None
else:
# updates the progress bar string
p_str, pw0 = 'Loading File {0} of {1}'.format(i_file+1, n_file), i_file / n_file
self.work_progress.emit(p_str, 100.0 * pw0)
# sets the experiment file and name
if load_dlg.exp_name[i_file] not in loaded_exp:
# loads the data from the data file
with open(load_dlg.exp_files[i_file], 'rb') as fp:
data_nw = p.load(fp)
# setting of other fields
if isinstance(data_nw, dict):
data_nw['expFile'] = load_dlg.exp_files[i_file]
# re-calculates the signal features (single experiment only)
if f_extn == '.cdata':
if np.shape(data_nw['sigFeat'])[1] == 5:
# memory allocation for the signal features
xi = np.array(range(data_nw['nPts']))
sFeat = np.zeros((data_nw['nC'], 2))
for i in range(data_nw['nC']):
# creates the piecewise-polynomial of the mean signal
pp, t_max = pchip(xi, data_nw['vMu'][:, i]), data_nw['sigFeat'][i, 2]
t_min = np.argmin(data_nw['vMu'][int(t_max):, i]) + t_max
v_max_2 = data_nw['vMu'][int(t_max), i] / 2.0
v_min = np.min(data_nw['vMu'][int(t_max):, i])
v_half = data_nw['vMu'][int(data_nw['sigFeat'][i, 1]), i] / 2.0
##################################################
#### POST-STIMULI SPIKE HALF-WIDTH TIME ####
##################################################
# determines the point/voltage of the pmaximum proceding the minimum
bnd_1 = [(data_nw['sigFeat'][i, 0], data_nw['sigFeat'][i, 1])]
bnd_2 = [(data_nw['sigFeat'][i, 1], data_nw['sigFeat'][i, 2])]
bnd_3 = [(data_nw['sigFeat'][i, 2], t_min)]
# determines the location of the half-width points
t_hw1_lo = cfcn.opt_time_to_y0((pp, v_half), bnd_1)
t_hw1_hi = cfcn.opt_time_to_y0((pp, v_half), bnd_2)
t_hw2_lo = cfcn.opt_time_to_y0((pp, v_max_2), bnd_2)
t_hw2_hi = cfcn.opt_time_to_y0((pp, v_max_2), bnd_3)
t_rlx = cfcn.opt_time_to_y0((pp, v_min + p_rlx * (v_max_2 - v_min)), bnd_3)
# determine if it is feasible to find the 2nd peak half-width point
if (t_hw2_hi is None) or (t_rlx is None):
# if not, then linearly extrapolate past the end point of the signal
xi2 = np.array(range(2*xi[-1]))
ppL = IUS(xi, data_nw['vMu'][:, i], k=1)
# determines the half-width/relaxtion time from the extrapolated signal
bnd_4 = [(data_nw['sigFeat'][i, 2], xi2[-1])]
t_hw2_hi = cfcn.opt_time_to_y0((ppL, v_max_2), bnd_4)
t_rlx = cfcn.opt_time_to_y0((ppL, v_min + p_rlx * (v_max_2 - v_min)), bnd_4)
# calculates the new signal features
data_nw['sigFeat'][i, 3] = t_hw1_lo
data_nw['sigFeat'][i, 4] = t_hw1_hi
sFeat[i, 0] = t_hw2_hi - t_hw2_lo
sFeat[i, 1] = t_rlx - t_max
# concatenates the new signal feature date
data_nw['sigFeat'] = np.concatenate((data_nw['sigFeat'], sFeat), axis=1)
# sets the cell cluster include indices (if not already set)
if 'clInclude' not in data_nw['expInfo']:
data_nw['expInfo']['clInclude'] = np.ones(data_nw['nC'], dtype=bool)
# appends the new data dictionary to the overall data list
data.append(data_nw)
# appends the current filename to the data dictionary and returns the object
return data
def save_multi_expt_file(self, data, out_info):
'''
:return:
'''
# updates the progressbar
self.work_progress.emit('Saving Data To File...', 50.0)
# sets the file extension (based on the data type)
if hasattr(data.comp, 'data'):
f_extn = 'mdata' if len(data.comp.data) == 0 else 'mcomp'
else:
f_extn = 'mdata'
# sets the output file name
out_file = os.path.join(out_info['inputDir'], '{0}.{1}'.format(out_info['dataName'], f_extn))
# outputs the data to file
with open(out_file, 'wb') as fw:
p.dump(data, fw)
# updates the progressbar
self.work_progress.emit('Data Save Complete!', 100.0)
def save_multi_comp_file(self, data, out_info):
'''
:return:
'''
# updates the progressbar
self.work_progress.emit('Saving Data To File...', 50.0)
# memory allocation
n_file = len(out_info['exptName'])
# sets the output file name
out_file = os.path.join(out_info['inputDir'], '{0}.mcomp'.format(out_info['dataName']))
# output data file
data_out = {
'data': np.empty((n_file, 2), dtype=object),
'c_data': np.empty(n_file, dtype=object),
'ff_corr': data.comp.ff_corr if hasattr(data.comp, 'ff_corr') else None,
'f_data': data.externd.free_data if hasattr(data.externd, 'free_data') else None
}
for i_file in range(n_file):
# retrieves the index of the data field corresponding to the current experiment
fix_file = out_info['exptName'][i_file].split('/')[0]
i_comp = cf.det_comp_dataset_index(data.comp.data, fix_file)
# creates the multi-experiment data file based on the type
data_out['c_data'][i_file] = data.comp.data[i_comp]
data_out['data'][i_file, 0], data_out['data'][i_file, 1] = \
cf.get_comp_datasets(data, c_data=data_out['c_data'][i_file], is_full=True)
# outputs the data to file
with open(out_file, 'wb') as fw:
p.dump(data_out, fw)
# updates the progressbar
self.work_progress.emit('Data Save Complete!', 100.0)
def init_pool_worker(self):
'''
:return:
'''
# creates the pool worker object
p = mp.Pool(int(np.floor(1.5 * mp.cpu_count())))
# returns the object
return p
def init_cluster_data(self):
'''
:return:
'''
def map_cluster_depths():
'''
:param cluster_depth:
:return:
'''
# retrieves the cluster depths from the spike I/O class object
cluster_depth = sp_io.get_cluster_depths(cluster_ids)
# sets the mapped cluster depths based on the file type
if (exp_info['dmapFile'] is None) or (len(exp_info['dmapFile']) == 0):
# no map is given so return the original depth values
return cluster_depth, None
else:
# otherwise, map the cluster depth values from the probe to actual values
data = np.array(pd.read_csv(exp_info['dmapFile']))
if np.size(data, axis=1) < 4:
# if the mapping file is not correct, then output an error to screen
e_str = 'Channel mapping file does not have the correct format.\n\n' \
'Re-select a valid file before attempting to initialise the combined data files.'
self.work_error.emit(e_str, 'Invalid Channel Mapping File')
# return none values indicating the error
return None, None
else:
# otherwise, return the mapped channel depths and the other mapping values
return np.array([data[data[:, 1] == x, 0][0] for x in cluster_depth]), data[:, :4]
# retrieves the job parameters
exp_info, out_name, g_para = self.thread_job_para[0], self.thread_job_para[1], self.thread_job_para[2]
# sets the global parameters
n_hist = int(g_para['n_hist'])
n_spike = int(g_para['n_spike'])
cluster_ids = None
# retrieves the spike I/O data and sets the cluster IDs based on the cluster type
sp_io = spike_io.SpikeIo(exp_info['srcDir'], exp_info['traceFile'], int(exp_info['nChan']))
if exp_info['clusterType'] == 'Good':
# case is the good clusters
if hasattr(sp_io, 'good_cluster_ids'):
cluster_ids = sp_io.good_cluster_ids
elif exp_info['clusterType'] == 'MUA':
# case is the multi-unit clusters
if hasattr(sp_io, 'MUA_cluster_ids'):
cluster_ids = sp_io.MUA_cluster_ids
if cluster_ids is None:
e_str = 'Cluster group file is missing? Please re-run with cluster-group file in the source data directory'
self.work_error.emit(e_str, 'Cluster Group File Missing!')
return
# retrieves the clusters spike data and channel depths
self.work_progress.emit('Reshaping Cluster Data...', 0.0)
clusters = [ClusterRead(sp_io, cid) for cid in cluster_ids]
# determines the channel depths mapping
depth, channel_map_data = map_cluster_depths()
if depth is None:
# if the file has an incorrect format, then exit the function
return
# determines if the mapping values were set correctly
if channel_map_data is not None:
# if so, then determine the region/recording layers
y_coords = channel_map_data[:, 3]
depthLo, depthHi = np.array(exp_info['depthLo']).astype(int), np.array(exp_info['depthHi']).astype(int)
indD = np.array([next((i for i in range(len(depthHi)) if x <= depthHi[i]), len(depthHi)-1) for x in y_coords])
chRegion = np.array(exp_info['regionName'])[indD][depth.astype(int)]
chLayer = np.array(exp_info['recordLayer'])[indD][depth.astype(int)]
else:
# otherwise, return N/A for the region/recording layers
chRegion, chLayer = ['N/A'] * len(clusters), ['N/A'] * len(clusters)
depthLo, depthHi = None, None
# sets the signal point-wise/ISI bin vectors
xi_pts_H = np.linspace(-200, 100, n_hist + 1)
xi_isi_H = np.linspace(0, 1000, n_hist + 1)
# creates the recording/experimental information sub-dictionaries
expInfo = {'name': exp_info['expName'], 'date': exp_info['expDate'], 'cond': exp_info['expCond'],
'type': exp_info['expType'], 'sex': exp_info['expSex'], 'age': exp_info['expAge'],
'probe': exp_info['expProbe'], 'lesion': exp_info['lesionType'], 'channel_map': channel_map_data,
'cluster_type': exp_info['clusterType'], 'other_info': exp_info['otherInfo'],
'record_state': exp_info['recordState'], 'record_coord': exp_info['recordCoord'],
'depth_lo': depthLo, 'depth_hi': depthHi}
# memory allocation
pW0, pW1, nFeat = 20.0, 60.0, 5
nC, nSample = len(clusters), np.size(sp_io.traces, axis=0)
sFreq, vGain = float(exp_info['sFreq']), float(exp_info['vGain'])
# sets the data file dictionary object
A = {
'vSpike': np.empty(nC, dtype=object), 'tSpike': np.empty(nC, dtype=object),
'vMu': None, 'vSD': None, 'ccGram': None, 'ccGramXi': None, 'sigFeat': np.zeros((nC, nFeat)),
'clustID': cluster_ids, 'expInfo': expInfo, 'chDepth': depth, 'chRegion': chRegion, 'chLayer': chLayer,
'sFreq': sFreq, 'nC': nC, 'nPts': None, 'tExp': nSample / sFreq, 'vGain': vGain,
'isiHist': np.empty(nC, dtype=object), 'isiHistX': xi_isi_H,
'ptsHist': np.empty(nC, dtype=object), 'ptsHistX': xi_pts_H,
'rotInfo': None,
}
# sets up the rotation analysis data dictionary
A['rotInfo'] = rot.load_rot_analysis_data(A, exp_info, sp_io, w_prog=self.work_progress, pW0=pW0)
# sets up the sub-job flags
self.sub_job = np.zeros(nC, dtype=bool)
# retrieves the cluster data
for i, c in enumerate(clusters):
if not self.is_running:
# if the user cancelled, then exit the function
return
else:
# updates the main gui progressnbar
pW = pW0 + pW1 * (i + 1) / nC
self.work_progress.emit('Processing Cluster {0} of {1}'.format(i + 1, nC), pW)
###################################################
#### DATA RETRIEVAL & MEMORY ALLOCATIONS ####
###################################################
# retrieves the spike voltage/timing
v_spike = c.channel_waveforms
t_spike = 1000.0 * sp_io.get_spike_times_in_cluster(cluster_ids[i]) / sFreq
# memory allocation (only for the first cluster)
if i == 0:
A['nPts'] = np.size(v_spike, axis=0)
A['vMu'] = np.zeros((A['nPts'], nC), dtype=float)
A['vSD'] = np.zeros((A['nPts'], nC), dtype=float)
xi = np.array(range(A['nPts']))
###############################################
#### MAIN METRIC CALCULATION/STORAGE ####
###############################################
# sets the values into the final array
A['vSpike'][i] = v_spike[:, :n_spike] * vGain
A['tSpike'][i] = t_spike[:np.size(v_spike, axis=1)]
# calculates the mean/standard deviation of the voltage spikes
A['vMu'][:, i] = np.mean(v_spike, axis=1) * vGain
A['vSD'][:, i] = np.std(v_spike, axis=1) * vGain
######################################
#### HISTOGRAM CALCULATIONS ####
######################################
# calculates the point-wise histograms
A['ptsHist'][i] = np.zeros((A['nPts'], n_hist), dtype=int)
for iPts in range(A['nPts']):
H = np.histogram(v_spike[iPts, :], bins=xi_pts_H)
A['ptsHist'][i][iPts, :] = H[0]
# calculates the ISI histograms
dT = np.diff(A['tSpike'][i])
dT = dT[dT <= xi_isi_H[-1]]
H_isi = np.histogram(dT, bins=xi_isi_H, range=(xi_isi_H[0], xi_isi_H[-1]))
A['isiHist'][i] = H_isi[0]
###########################################
#### SIGNAL FEATURE CALCULATIONS ####
###########################################
# creates the piecewise-polynomial of the mean signal
pp = pchip(xi, A['vMu'][:, i])
# determines the point/voltage of the pmaximum proceding the minimum
i_min = np.argmin(A['vMu'][:, i])
i_max1 = np.argmax(A['vMu'][:i_min, i])
i_max2 = np.argmax(A['vMu'][i_min:, i]) + i_min
# determines the location of the half-width points
v_half = (min(pp(i_max1), pp(i_max2)) + pp(i_min)) / 2.0
t_lo = cfcn.opt_time_to_y0((pp, v_half), [(i_max1, i_min)])
t_hi = cfcn.opt_time_to_y0((pp, v_half), [(i_min, i_max2)])
# sets the signal features into the final array
A['sigFeat'][i, :] = [i_max1, i_min, i_max2, t_lo, t_hi]
# memory garbage collection
gc.collect()
######################################################
#### CLUSTER CROSS-CORRELOGRAM CALCULATIONS ####
######################################################
# memory allocation
win_size = 50
# calculates the cross-correlation between each signal from each cluster
for i_row in range(nC):
if not self.is_running:
# if the user cancelled, then exit the function
return
else:
# updates the main gui progressbar
pW = (pW0 + pW1) + (100.0 - (pW0 + pW1)) * (i_row + 1) / (nC + 1)
self.work_progress.emit('Calculating CC-Grams...', pW)
# calculates the cross-correlograms between each of the other clusters
for j_row in range(nC):
if (i_row == 0) and (j_row == 0):
# case is the first cluster so allocate memory and set the time bin array
ccGram, A['ccGramXi'] = cfcn.calc_ccgram(A['tSpike'][i_row], A['tSpike'][j_row], win_size)
A['ccGram'] = np.zeros((nC, nC, len(ccGram)))
A['ccGram'][i_row, j_row, :] = ccGram
else:
# otherwise, set the new values directly into the array
A['ccGram'][i_row, j_row, :], _ = cfcn.calc_ccgram(A['tSpike'][i_row], A['tSpike'][j_row], win_size)
#################################
#### FINAL DATA OUTPUT ####
#################################
# dumps the cluster data to file
self.work_progress.emit('Outputting Data To File...', 99.0)
cf.save_single_file(out_name, A)
##########################################
#### CLUSTER MATCHING FUNCTIONS ####
##########################################
def det_cluster_matches(self, data, calc_para, w_prog):
'''
:param exp_name:
:param comp_dlg:
:return:
'''
# retrieves the comparison dataset
i_comp = cf.det_comp_dataset_index(data.comp.data, calc_para['calc_comp'])
c_data, data.comp.last_comp = data.comp.data[i_comp], i_comp
# if there is no further calculation necessary, then exit the function
if c_data.is_set:
return
# updates the cluster matching parameters
c_data.is_set = True
c_data.d_max = calc_para['d_max']
c_data.r_max = calc_para['r_max']
c_data.sig_corr_min = calc_para['sig_corr_min']
c_data.isi_corr_min = calc_para['isi_corr_min']
c_data.sig_diff_max = calc_para['sig_diff_max']
c_data.sig_feat_min = calc_para['sig_feat_min']
c_data.w_sig_feat = calc_para['w_sig_feat']
c_data.w_sig_comp = calc_para['w_sig_comp']
c_data.w_isi = calc_para['w_isi']
# retrieves the fixed/free cluster dataframes
data_fix, data_free = cf.get_comp_datasets(data, c_data=c_data, is_full=True)
def det_overall_cluster_matches(is_feas, D):
'''
:param data_fix:
:param data_free:
:param D:
:return:
'''
# calculates the pair-wise SS distances between each the fixed/free mean signals
iDsort, n_rows = np.argsort(D.T, axis=None), np.size(D, axis=0)
# memory allocation
isFix = np.zeros(data_fix['nC'], dtype=bool)
isFree = np.zeros(data_free['nC'], dtype=bool)
i_match = -np.ones(data_fix['nC'], dtype=int)
# determines the overall unique
for i in range(len(iDsort)):
# determines the indices of the next best match
iR, iC = cfcn.ind2sub(n_rows, iDsort[i])
if not (isFix[iR] or isFree[iC]) and is_feas[iR, iC]:
# if there is not already a match, then update the match arrays
i_match[iR] = iC
isFix[iR], isFree[iC] = True, True
if all(isFix) or all(isFree):
# if all matches are found, then exit the loop
break
# returns the final match array
return i_match
def det_cluster_matches_old(c_data, is_feas, d_depth):
'''
:param data_fix:
:param data_free:
:return:
'''
# parameters
z_max = 1.0
# calculates the inter-signal euclidean distances
DD = cdist(data_fix['vMu'].T, data_free['vMu'].T)
# determines the matches based on the signal euclidean distances
c_data.i_match_old = det_overall_cluster_matches(is_feas, DD)
# calculates the correlation coefficients between the best matching signals
for i in range(data_fix['nC']):
# calculation of the z-scores
i_match = c_data.i_match_old[i]
if i_match >= 0:
# z-score calculations
dW = data_fix['vMu'][:, i] - data_free['vMu'][:, i_match]
c_data.z_score[:, i] = np.divide(dW, data_fix['vSD'][:, i])
# calculates the correlation coefficient
CC = np.corrcoef(data_fix['vMu'][:, i], data_free['vMu'][:, i_match])
c_data.sig_corr_old[i] = CC[0, 1]
c_data.sig_diff_old[i] = DD[i, i_match]
c_data.d_depth_old[i] = d_depth[i, i_match]
# sets the acceptance flag. for a cluster to be accepted, the following must be true:
# * the maximum absolute z-score must be < z_max
# * the correlation coefficient between the fixed/free signals must be > sig_corr_min
c_data.is_accept_old[i] = np.max(np.abs(c_data.z_score[:, i])) < z_max and \
c_data.sig_corr_old[i] > c_data.sig_corr_min
else:
# sets NaN values for all the single value metrics
c_data.sig_corr[i] = np.nan
c_data.d_depth_old[i] = np.nan
# ensures the group is rejected
c_data.is_accept_old[i] = False
def det_cluster_matches_new(c_data, is_feas, d_depth, r_spike, w_prog):
'''
:param data_fix:
:param data_free:
:return:
'''
# parameters
pW = 100.0 / 7.0
# memory allocation
signal_metrics = np.zeros((data_fix['nC'], data_free['nC'], 4))
isi_metrics = np.zeros((data_fix['nC'], data_free['nC'], 3))
isi_metrics_norm = np.zeros((data_fix['nC'], data_free['nC'], 3))
total_metrics = np.zeros((data_fix['nC'], data_free['nC'], 3))
# initialises the comparison data object
w_prog.emit('Calculating Signal DTW Indices', pW)
c_data = cfcn.calc_dtw_indices(c_data, data_fix, data_free, is_feas)
# calculates the signal feature metrics
w_prog.emit('Calculating Signal Feature Metrics', 2.0 * pW)
signal_feat = cfcn.calc_signal_feature_diff(data_fix, data_free, is_feas)
# calculates the signal direct matching metrics
w_prog.emit('Calculating Signal Comparison Metrics', 3.0 * pW)
cc_dtw, dd_dtw, dtw_scale = \
cfcn.calc_signal_corr(c_data.i_dtw, data_fix, data_free, is_feas)
signal_metrics[:, :, 0] = cc_dtw
signal_metrics[:, :, 1] = 1.0 - dd_dtw
signal_metrics[:, :, 2] = dtw_scale
signal_metrics[:, :, 3] = \
cfcn.calc_signal_hist_metrics(data_fix, data_free, is_feas, cfcn.calc_hist_intersect, max_norm=True)
# calculates the ISI histogram metrics
w_prog.emit('Calculating ISI Histogram Comparison Metrics', 4.0 * pW)
isi_metrics[:, :, 0], isi_metrics_norm[:, :, 0] = \
cfcn.calc_isi_corr(data_fix, data_free, is_feas)
isi_metrics[:, :, 1], isi_metrics_norm[:, :, 1] = \
cfcn.calc_isi_hist_metrics(data_fix, data_free, is_feas, cfcn.calc_hist_intersect, max_norm=True)
# isi_metrics[:, :, 2], isi_metrics_norm[:, :, 2] = \
# cfcn.calc_isi_hist_metrics(data_fix, data_free, is_feas, cfcn.calc_wasserstein, max_norm=False)
# isi_metrics[:, :, 3], isi_metrics_norm[:, :, 3] = \
# cfcn.calc_isi_hist_metrics(data_fix, data_free, is_feas, cfcn.calc_bhattacharyya, max_norm=True)
# sets the isi relative spiking rate metrics
isi_metrics[:, :, 2] = np.nan
for i_row in range(np.size(r_spike, axis=0)):
isi_metrics[i_row, is_feas[i_row, :], 2] = r_spike[i_row, is_feas[i_row, :]]
isi_metrics_norm[:, :, 2] = cfcn.norm_array_rows(isi_metrics[:, :, 2], max_norm=False)
# calculates the array euclidean distances (over all measures/clusters)
weight_array = [c_data.w_sig_feat, c_data.w_sig_comp, c_data.w_isi]
total_metrics[:, :, 0] = cfcn.calc_array_euclidean(signal_feat)
total_metrics[:, :, 1] = cfcn.calc_array_euclidean(signal_metrics)
total_metrics[:, :, 2] = cfcn.calc_array_euclidean(isi_metrics_norm)
total_metrics_mean = cfcn.calc_weighted_mean(total_metrics, W=weight_array)
# determines the unique overall cluster matches
w_prog.emit('Determining Overall Cluster Matches', 5.0 * pW)
c_data.i_match = det_overall_cluster_matches(is_feas, -total_metrics_mean)
# matches which are from different regions are to be removed
ii = np.where(c_data.i_match >= 0)[0]
same_region = data_fix['chRegion'][ii] == data_free['chRegion'][c_data.i_match[ii]]
c_data.i_match[ii[~same_region]] = -1
# calculates the correlation coefficients between the best matching signals
w_prog.emit('Setting Final Match Metrics', 6.0 * pW)
for i in range(data_fix['nC']):
# calculation of the z-scores
i_match = c_data.i_match[i]
if i_match >= 0:
# sets the signal feature metrics
c_data.match_intersect[:, i] = cfcn.calc_single_hist_metric(data_fix, data_free, i, i_match,
True, cfcn.calc_hist_intersect)
c_data.match_wasserstain[:, i] = cfcn.calc_single_hist_metric(data_fix, data_free, i,
i_match, True, cfcn.calc_wasserstein)
c_data.match_bhattacharyya[:, i] = cfcn.calc_single_hist_metric(data_fix, data_free, i,
i_match, True, cfcn.calc_bhattacharyya)
# sets the signal difference metrics
c_data.d_depth[i] = d_depth[i, i_match]
c_data.dtw_scale[i] = dtw_scale[i, i_match]
c_data.sig_corr[i] = cc_dtw[i, i_match]
c_data.sig_diff[i] = max(0.0, 1 - dd_dtw[i, i_match])
c_data.sig_intersect[i] = signal_metrics[i, i_match, 2]
# sets the isi metrics
c_data.isi_corr[i] = isi_metrics[i, i_match, 0]
c_data.isi_intersect[i] = isi_metrics[i, i_match, 1]
# sets the total match metrics
c_data.signal_feat[i, :] = signal_feat[i, i_match, :]
c_data.total_metrics[i, :] = total_metrics[i, i_match, :]
c_data.total_metrics_mean[i] = total_metrics_mean[i, i_match]
# sets the acceptance flag. for a cluster to be accepted, the following must be true:
# * the ISI correlation coefficient must be > isi_corr_min
# * the signal correlation coefficient must be > sig_corr_min
# * the inter-signal euclidean distance must be < sig_diff_max
# * all signal feature metric similarity scores must be > sig_feat_min
c_data.is_accept[i] = (c_data.isi_corr[i] > c_data.isi_corr_min) and \
(c_data.sig_corr[i] > c_data.sig_corr_min) and \
(c_data.sig_diff[i] > (1 - c_data.sig_diff_max)) and \
(np.all(c_data.signal_feat[i, :] > c_data.sig_feat_min))
else:
# sets NaN values for all the single value metrics
c_data.d_depth[i] = np.nan
c_data.dtw_scale[i] = np.nan
c_data.sig_corr[i] = np.nan
c_data.sig_diff[i] = np.nan
c_data.sig_intersect[i] = np.nan
c_data.isi_corr[i] = np.nan
c_data.isi_intersect[i] = np.nan
c_data.signal_feat[i, :] = np.nan
c_data.total_metrics[i, :] = np.nan
c_data.total_metrics_mean[i] = np.nan
# ensures the group is rejected
c_data.is_accept[i] = False
# determines the number of spikes
n_spike_fix = [len(x) / data_fix['tExp'] for x in data_fix['tSpike']]
n_spike_free = [len(x) / data_free['tExp'] for x in data_free['tSpike']]
# calculates the relative spiking rates (note - ratios are coverted so that they are all > 1)
r_spike = np.divide(repmat(n_spike_fix, data_free['nC'], 1).T,
repmat(n_spike_free, data_fix['nC'], 1))
r_spike[r_spike < 1] = 1 / r_spike[r_spike < 1]
# calculates the pair-wise distances between the fixed/free probe depths
d_depth = np.abs(np.subtract(repmat(data_fix['chDepth'], data_free['nC'], 1).T,
repmat(data_free['chDepth'], data_fix['nC'], 1)))
# determines the feasible fixed/free cluster groupings such that:
# 1) the channel depth has to be <= d_max
# 2) the relative spiking rates between clusters is <= r_max
is_feas = np.logical_and(r_spike <= c_data.r_max, d_depth <= c_data.d_max)
# determines the cluster matches from the old/new methods
det_cluster_matches_old(c_data, is_feas, d_depth)
det_cluster_matches_new(c_data, is_feas, d_depth, r_spike, w_prog)
def calc_ccgram_types(self, calc_para, data):
'''
:return:
'''
# determines the indices of the experiment to be analysed
if calc_para['calc_all_expt']:
# case is all experiments are to be analysed
i_expt = list(range(len(data)))
else:
# case is a single experiment is being analysed
i_expt = [cf.get_expt_index(calc_para['calc_exp_name'], data)]
# memory allocation
d_copy = copy.deepcopy
A, B, C = np.empty(len(i_expt), dtype=object), [[] for _ in range(5)], [[] for _ in range(4)]
c_type, t_dur, t_event, ci_lo, ci_hi, ccG_T = d_copy(A), d_copy(A), d_copy(A), d_copy(A), d_copy(A), d_copy(A)
#
for i_ex in i_expt:
# sets the experiment ID info based on the number of experiments being analysed
if len(i_expt) == 1:
# only one experiment is being analysed
expt_id = None
else:
# multiple experiments are being analysed
expt_id = [(i_ex+1), len(i_expt)]
# retrieves the cluster information
t_dur[i_ex], t_event[i_ex] = d_copy(C), d_copy(C)
c_type[i_ex], ci_lo[i_ex], ci_hi[i_ex], ccG_T[i_ex] = d_copy(B), d_copy(B), d_copy(B), d_copy(B)
ccG, ccG_xi, t_spike = data[i_ex]['ccGram'], data[i_ex]['ccGramXi'], data[i_ex]['tSpike']
c_id = data[i_ex]['clustID']
# runs the cc-gram type calculation function
c_type0, t_dur[i_ex], t_event[i_ex], ci_hi0, ci_lo0, ccG_T0 = cfcn.calc_ccgram_types(
ccG, ccG_xi, t_spike, calc_para=calc_para, expt_id=expt_id, w_prog=self.work_progress, c_id=c_id)
# sets the final values into their respective groupings
for i in range(5):
# sets the final type values and lower/upper bound confidence interval signals
if len(c_type0[i]):
#
c_type[i_ex][i] = np.vstack(c_type0[i])
# sorts the values by the reference cluster index
i_sort = np.lexsort((c_type[i_ex][i][:, 1], c_type[i_ex][i][:, 0]))
c_type[i_ex][i] = c_type[i_ex][i][i_sort, :]
# reorders the duration/timing of the events (if they exist)
if i < len(t_dur[i_ex]):
t_dur[i_ex][i] = np.array(t_dur[i_ex][i])[i_sort]
t_event[i_ex][i] = np.array(t_event[i_ex][i])[i_sort]
ci_lo[i_ex][i] = (np.vstack(ci_lo0[i]).T)[:, i_sort]
ci_hi[i_ex][i] = (np.vstack(ci_hi0[i]).T)[:, i_sort]
ccG_T[i_ex][i] = (np.vstack(ccG_T0[i]).T)[:, i_sort]
# returns the data as a dictionary
return {'c_type': c_type, 't_dur': t_dur, 't_event': t_event,
'ci_lo': ci_lo, 'ci_hi': ci_hi, 'ccG_T': ccG_T, 'calc_para': calc_para}
def calc_shuffled_cluster_dist(self, calc_para, data):
'''
:return:
'''
# FINISH ME!
pass
##########################################
#### CLUSTER MATCHING FUNCTIONS ####
##########################################
def calc_fix_free_correlation(self, data, calc_para, w_prog):
'''
:param data:
:param plot_para:
:param calc_para:
:param w_prog:
:return:
'''
# initialisations
i_bin = ['5', '10'].index(calc_para['vel_bin'])
tt_key = {'DARK1': 'Black', 'DARK': 'Black', 'LIGHT1': 'Uniform', 'LIGHT2': 'Uniform'}
f_data, r_data, ff_corr = data.externd.free_data, data.rotation, data.comp.ff_corr
n_bin = 2 * int(f_data.v_max / float(calc_para['vel_bin']))
# determines matching experiment index and fix-to-free cell index arrays
i_expt, f2f_map = cf.det_matching_fix_free_cells(data, apply_filter=False)
# determines the global indices for each file
nC = [len(x) for x in r_data.r_obj_kine.clust_ind[0]]
ind_g = [np.arange(i0, i0 + n) for i0, n in zip(np.cumsum([0] + nC)[:-1], nC)]
# memory allocation
n_file, t_type = len(i_expt), f_data.t_type
nan_bin = np.nan * np.ones(n_bin)
ff_corr.sf_fix = np.empty((n_file, len(t_type)), dtype=object)
ff_corr.sf_free = np.empty((n_file, len(t_type)), dtype=object)
ff_corr.sf_corr = np.empty((n_file, len(t_type)), dtype=object)
ff_corr.sf_corr_sh = np.empty((n_file, len(t_type)), dtype=object)
ff_corr.sf_corr_sig = np.empty((n_file, len(t_type)), dtype=object)
ff_corr.sf_grad = np.empty((n_file, len(t_type)), dtype=object)
ff_corr.clust_id = np.empty(n_file, dtype=object)
ff_corr.ind_g = np.empty(n_file, dtype=object)
# sets the velocity spiking rates (depending on calculation type)
if r_data.is_equal_time:
# case is resampled spiking times
vel_sf = dcopy(r_data.vel_sf_rs)
else:
# case is non-resampled spiking times
vel_sf = dcopy(r_data.vel_sf)
# loops through each external data file retrieving the spike frequency data and calculating correlations
n_cell_tot, i_cell_tot = np.sum(np.array(nC)[i_expt]), 0
for i_file in range(n_file):
# initialisations for the current external data file
ind_nw = ind_g[i_expt[i_file]]
i_f2f = f2f_map[i_file][:, 1]
s_freq = dcopy(f_data.s_freq[i_file][i_bin, :])
# retrieves the spiking frequency data between the matched fixed/free cells for the current experiment
for i_tt, tt in enumerate(t_type):
# sets the fixed/free spiking frequency values
ff_corr.sf_fix[i_file, i_tt] = np.nanmean(vel_sf[tt_key[tt]][:, :, ind_nw], axis=0).T
ff_corr.sf_free[i_file, i_tt] = np.vstack([s_freq[i_tt][ii] if ii >= 0 else nan_bin for ii in i_f2f])
# sets the cluster ID values
is_ok = i_f2f >= 0
i_expt_fix = cf.get_global_expt_index(data, data.comp.data[i_expt[i_file]])
fix_clust_id = np.array(data._cluster[i_expt_fix]['clustID'])[is_ok]
free_clust_id = np.array(data.externd.free_data.cell_id[i_file])[f2f_map[i_file][is_ok, 1]]
ff_corr.clust_id[i_file] = np.vstack((fix_clust_id, free_clust_id)).T
ff_corr.ind_g[i_file] = ind_nw
# removes any spiking frequency data for where there is no matching data
cfcn.calc_shuffled_sf_corr(ff_corr, i_file, calc_para, [i_cell_tot, n_cell_tot], w_prog)
# increments the progressbar counter
i_cell_tot += len(ind_nw)
# sets the parameter values
ff_corr.vel_bin = int(calc_para['vel_bin'])
ff_corr.n_shuffle_corr = calc_para['n_shuffle']
ff_corr.split_vel = int(calc_para['split_vel'])
ff_corr.is_set = True
######################################
#### EYE TRACKING FUNCTIONS ####
######################################
def calc_eye_track_metrics(self, data, calc_para, w_prog):
'''
:param data:
:param calc_para:
:param w_prog:
:return:
'''
def calc_position_diff(p0, dt, calc_para):
'''
:param p:
:param dt:
:param calc_para:
:return:
'''
# retrieves the position values and calculates the rolling difference
is_ok, n_frm = ~p0.isna(), p0.shape[0]
# calculates the mid-point derivative values
dp0 = p0.rolling(window=3, center=True).apply(lambda x: (x[2] - x[0]) / 2)
# calculates the end-point derivative values (for the first/last valid values)
i_ok = np.where(is_ok)[0]
i0, i1 = i_ok[0], i_ok[-1]
dp0.iloc[i0] = sum(np.multiply([-3, 4, -1], np.array(p0.iloc[i0:i0+3]).astype(float))) / 2
dp0.iloc[i1] = sum(np.multiply([ 3, -4, 1], np.array(p0.iloc[i1-3:i1]).astype(float))) / 2
# calculates the rolling median
if calc_para['use_med_filt']:
dp0_med = dp0.rolling(window=3, center=True).median()
else:
dp0_med = dp0
# converts pd dataframes to float np-arrays (sets any NaN derivative values to zero)
p = np.array(p0).astype(float)
dp = np.array(dp0_med).astype(float) / (1000. * dt)
dp[~is_ok] = 0
# removes any outliers (regions where the derivative is greater than dp_max)
i_grp = cf.get_index_groups(np.abs(dp) > calc_para['dp_max'])
for ig in cf.expand_index_groups(i_grp, 2, n_frm):
dp[ig], p[ig] = 0, np.nan
# removes the baseline component (if required)
if calc_para['rmv_baseline']:
w_frm = 70 / n_frm
dp_bl = lowess(dp, np.arange(n_frm), w_frm, return_sorted=False)
dp -= dp_bl
# returns the derivative array
return dp - np.nanmean(dp), p
def det_movement_events(p_pos, dp_pos, calc_para, n_pre, n_post, t_frm):
'''
:param dp_pos:
:return:
'''
def get_event_sig_seg(p_pos, i_grp0, n_pre, n_post, n_frm):
'''
:param p_pos:
:param i_grp0:
:param n_frm:
:return:
'''
def get_sig_seg(y_sig, i_grp0, n_pp, n_frm=None):
'''
:param dp_pos:
:param i_grp0:
:param n_frm:
:return:
'''
if n_frm is None:
# case is the signal values preceding the onset point
return list(y_sig[max(0, (i_grp0 - n_pp)):(i_grp0 + 1)])
else:
# case is the signal values proceding the onset point
return list(y_sig[(i_grp0 + 1):min(n_frm - 1, i_grp0 + (1 + n_pp))])
return np.array(get_sig_seg(p_pos, i_grp0, n_pre) + get_sig_seg(p_pos, i_grp0, n_post, n_frm))
# initialisations
n_frm, i_ofs = len(t_frm), 1
t_evnt, y_evnt = [], []
n_sd, dp_max, n_event_win = calc_para['n_sd'], calc_para['dp_max'], n_pre + n_post + 1
# thresholds the position derivative values
b_arr, sgn_arr = np.abs(dp_pos) >= np.nanstd(dp_pos) * n_sd, np.sign(dp_pos)
if np.any(b_arr):
# if there are any derivative values greater than threshold, then determine the index groups of the
# continguous points that are greater than threshold. from this determine the max absolute amplitudes within
# these groups and the start indices of each group
i_grp = cf.get_index_groups(b_arr)
grp_mx, i_grp0 = [np.max(np.abs(dp_pos[x])) for x in i_grp], np.array([(x[0] - i_ofs) for x in i_grp])
# determines the groups that are within the event window (and have a position derivative less than the
# maximum derivative parameter value, dp_max)
di_grp0 = np.diff(i_grp0)
is_ok = np.array([(x >= n_pre) and (x <= (n_frm - n_post)) for x in i_grp0])
for ig in np.where(di_grp0 < n_event_win)[0]:
if sgn_arr[i_grp0[ig]] * sgn_arr[i_grp0[ig + 1]] < 0:
# if the thresholded groups have differing derivative signs, then ignore both groups
is_ok[ig:ig+2] = False
else:
# otherwise, remove the thresholded group with the lower amplitude peak
is_ok[1 + (grp_mx[ig] > grp_mx[ig + 1])] = False
# memory allocation
n_evnt = len(is_ok)
t_evnt0, y_evnt0 = np.zeros(n_evnt), np.zeros((n_evnt, n_event_win))
# removes the ignored contiguous groups
for i in range(n_evnt):
if is_ok[i]:
y_evnt_nw = get_event_sig_seg(p_pos, i_grp0[i], n_pre, n_post, n_frm)
if not np.any(np.isnan(y_evnt_nw)):
y_evnt0[i, :], t_evnt0[i] = y_evnt_nw, t_frm[i_grp0[i]]
else:
is_ok[i] = False
# removes the
t_evnt0, y_evnt0 = t_evnt0[is_ok], y_evnt0[is_ok]
# appends the time stamps of the events for both eye movement types
i_sgn = np.array([int(sgn_arr[x + i_ofs] > 0) for x in i_grp0[is_ok]])
t_evnt.append([t_evnt0[i_sgn == i] for i in range(2)])
# sets the sub-signal/mean sub-signal values for both eye movement types
y_evnt_tmp = [y_evnt0[i_sgn == i, :] for i in range(2)]
y_evnt.append([np.subtract(x, x[:, n_pre][:, None]) if len(x) else [] for x in y_evnt_tmp])
else:
# if no event, then set empty time/signal events for both types
t_evnt.append([[], []])
y_evnt.append([[], []])
# returns the event time/signal arrays
return t_evnt, y_evnt
# retrieves the eye-tracking class object
et_class = data.externd.eye_track
n_file = len(et_class.et_data)
# sets the pre/post event duration
n_pre, n_post = calc_para['n_pre'], calc_para['n_post']
# memory allocation
dt = 1 / et_class.fps
A = np.empty(n_file, dtype=object)
et_class.t_evnt, et_class.y_evnt = dcopy(A), dcopy(A)
et_class.t_type = list(np.unique(cf.flat_list([x.t_type for x in et_class.et_data])))
# loops through each of the file calculating the eye-movement events
for i_file, et_d in enumerate(et_class.et_data):
# updates the progress bar string
w_str = 'Detecting Movement Events (Expt {0} of {1})'.format(i_file + 1, n_file)
# memory allocation
n_tt = len(et_d.t_type)
B = np.empty(len(et_class.t_type), dtype=object)
et_class.t_evnt[i_file], et_class.y_evnt[i_file] = dcopy(B), dcopy(B)
# loops through each of the trial types calculate the eye-movement events
for i_tt in range(n_tt):
# updates the progress-bar
w_prog.emit(w_str, 100. * ((i_file * n_tt + i_tt) / (n_tt * n_file)))
# retrieves the position values
p0 = dcopy(et_d.p_pos[i_tt])
if calc_para['use_med_filt']:
# calculates the rolling median (if required)
p0 = p0.rolling(window=3, center=True).median()
# calculates the position difference values
dp, p = calc_position_diff(p0, dt, calc_para)
# calculates the events/signal sub-segments for all events
j_tt = et_class.t_type.index(et_class.et_data[i_file].t_type[i_tt])
t_frm = np.arange(len(p)) / et_class.fps
tt, yy = det_movement_events(p, dp, calc_para, n_pre, n_post, t_frm)
et_class.t_evnt[i_file][j_tt], et_class.y_evnt[i_file][j_tt] = tt[0], yy[0]
#######################################
#### HOUSE-KEEPING EXERCISES ####
#######################################
# updates the calculation parameters
et_class.use_med_filt = calc_para['use_med_filt']
et_class.rmv_baseline = calc_para['rmv_baseline']
et_class.dp_max = calc_para['dp_max']
et_class.n_sd = calc_para['n_sd']
et_class.n_pre = calc_para['n_pre']
et_class.n_post = calc_para['n_post']
et_class.is_set = True
def calc_eye_track_corr(self, data, calc_para, w_prog):
'''
:param data:
:param calc_para:
:param w_prog:
:return:
'''
def get_trial_group_start_time(r_info, tt_c0):
'''
:param c:
:param tt_c:
:return:
'''
def get_expt_time_span(ind0, i_type):
'''
:param ind0:
:return:
'''
if i_type == 0:
# returns the first trial index
return ind0[0]
else:
# determines the 2nd order difference in the trial start times
dind0 = np.zeros(len(ind0), dtype=int)
dind0[2:] = np.diff(ind0, 2)
#
i_diff = np.where(np.abs(dind0) > 1e10)[0]
return ind0[i_diff[0]]
# sets the trial type (removes any extra indices at the end of the trial type string)
i_type = int(tt_c0[-1] == '2')
tt = tt_c0 if (i_type == 0) else tt_c0[:-1]
# retrieves the start time of the trial grouping
return get_expt_time_span(r_info['wfm_para'][tt]['ind0'], i_type)
def get_grouping_spike_times(t_sp, t_exp, t0):
'''
:param t_sp_c:
:param t_exp:
:param t0:
:return:
'''
# memory allocation
n_cell = len(t_sp)
t_sp_h = np.zeros((n_cell, len(t_exp)))
# calculates the time spiking histograms (for each cell) downsampled to that of the eye-tracking analysis
for i_cell in range(n_cell):
# retrieves the spike times for the current cell
t_sp_tmp = t_sp[i_cell] / 1000
t_sp_grp = t_sp_tmp[np.logical_and(t_sp_tmp >= t0, t_sp_tmp <= t0 + t_exp[-1])] - t0
# calculates the spike time histogram (time bins are set for the eye-tracking analysis)
t_sp_h[i_cell, 1:] = np.histogram(t_sp_grp, bins=t_exp)[0]
# returns the histogram arrays
return t_sp_h
def get_event_spike_times(t_sp_h, t_evnt, dt_et, calc_para):
'''
:param t_sp_h:
:param t_evnt:
:param calc_para:
:return:
'''
# memory allocation
n_cell, n_frm = np.shape(t_sp_h)
sp_evnt = np.empty(len(t_evnt), dtype=object)
# sets the pre/post event duration
n_pre, n_post = calc_para['n_pre'], calc_para['n_post']
n_pts = n_pre + n_post + 1
# retrieves the spike time events for each eye-movement type
for i in range(len(t_evnt)):
# sets the indices of the events (ensures all frames are within that of the eye-tracking analysis)
i_evnt = np.round(t_evnt[i] / dt_et).astype(int)
i_evnt = i_evnt[np.logical_and((i_evnt - n_pre) >= 0, (i_evnt + n_post) < n_frm)]
# memory allocation for eye-movement type
n_evnt = len(t_evnt[i])
sp_evnt[i] = np.zeros((n_evnt, n_pts, n_cell))
# retrieves the spike time histogram values over each cell/eye-movement event
for j in range(n_evnt):
i_rng = np.arange(i_evnt[j] - n_pre, i_evnt[j] + n_post + 1)
sp_evnt[i][j, :, :] = t_sp_h[:, i_rng].T
# returns the array
return sp_evnt
# initialisations and memory allocation
et_class = data.externd.eye_track
exp_file = [cf.extract_file_name(x['expFile']) for x in data.cluster]
n_exp, dt_et = et_class.n_file, 1. / et_class.fps
# memory allocation
A = np.empty(n_exp, dtype=object)
t_sp_h, sp_evnt, y_corr, p_corr = dcopy(A), dcopy(A), dcopy(A), dcopy(A)
# loops through each experiment calculating the spiking rate/eye movement correlations
for i_exp, et_d in enumerate(et_class.et_data):
# initialisations
n_tt, pw0 = len(et_d.t_type), 1 / n_exp
# memory allocation
B = np.empty(n_tt, dtype=object)
t_sp_h[i_exp], sp_evnt[i_exp], y_corr[i_exp], p_corr[i_exp] = dcopy(B), dcopy(B), dcopy(B), dcopy(B)
# retrieves the rotation info of the corresponding expt
c = data._cluster[cf.det_likely_filename_match(exp_file, et_class.exp_name[i_exp])]
r_info, dt_c, t_sp_c = c['rotInfo'], 1. / c['sFreq'], c['tSpike']
# loops through each trial type calculating the correlations
for i_tt, tt in enumerate(et_d.t_type):
# updates the progressbar
tt_c = tt.capitalize()
w_str = 'Calculating Correlations (Expt {0}/{1} - {2})'.format(i_tt + 1, n_tt, tt_c)
w_prog.emit(w_str, 100. * (pw0 + (i_tt / n_tt)))
# sets the time vector over the eye-tracking analysis
j_tt = et_class.t_type.index(et_class.et_data[i_exp].t_type[i_tt])
t_exp = np.arange(len(et_d.p_pos[j_tt])) * dt_et
# retrieves the spike times over the duration of the eye tracking analysis
t0 = get_trial_group_start_time(r_info, tt_c) * dt_c
t_sp_h[i_exp][j_tt] = get_grouping_spike_times(t_sp_c, t_exp, t0)
# retrieves the spike times traces surrounding the times of the eye movement
t_evnt = et_class.t_evnt[i_exp][j_tt]
sp_evnt[i_exp][j_tt] = get_event_spike_times(t_sp_h[i_exp][j_tt], t_evnt, dt_et, calc_para)
# calculates the correlations between each cell and the eye movement events
y_evnt = et_class.y_evnt[i_exp][j_tt]
y_corr[i_exp][j_tt], p_corr[i_exp][j_tt] = cfcn.calc_event_correlation(y_evnt, sp_evnt[i_exp][j_tt])
#######################################
#### HOUSE-KEEPING EXERCISES ####
#######################################
# sets the arrays into the eye-tracking class object
data.externd.eye_track.t_sp_h = t_sp_h
data.externd.eye_track.sp_evnt = sp_evnt
data.externd.eye_track.y_corr = y_corr
data.externd.eye_track.p_corr = p_corr
# final update of the progressbar
w_prog.emit('Correlation Calculations Complete!', 100.)
######################################
#### AHV ANALYSIS FUNCTIONS ####
######################################
def calc_corr_fit_para(self, data, plot_para, calc_para, w_prog):
'''
:param data:
:param plot_para:
:param calc_para:
:param w_prog:
:return:
'''
def calc_sf_lin_para(xi, sf, peak_hz, err_type):
'''
:param sf:
:return:
'''
# memory allocation
n_cell = np.shape(sf)[0]
sf_slope, sf_int = np.zeros(n_cell), np.zeros(n_cell)
sf_err = np.zeros(n_cell)
# calculates the linear parameters for each cell
for i_cell in range(n_cell):
# slope/intercept calculation
sf_calc = sf[i_cell]
l_fit = linregress(xi, sf_calc / peak_hz[i_cell])
sf_slope[i_cell], sf_int[i_cell] = l_fit.slope, l_fit.intercept
# error calculation
dsf_calc = (sf_calc - sf_calc[0])
dsf_max = np.max(np.abs(dsf_calc))
if (dsf_max > 0) and (err_type is not None):
if err_type == 'Covariance':
_, pcov = curve_fit(lin_func, xi, dsf_calc / dsf_max)
sf_err[i_cell] = np.sqrt(pcov[0][0])
elif err_type == 'Sum-of-Squares':
p_fit_err = np.polyfit(xi, dsf_calc / dsf_max, 1, full=True)
sf_err[i_cell] = p_fit_err[1][0]
elif err_type == 'Standard Error':
l_fit_err = linregress(xi, dsf_calc / dsf_max)
sf_err[i_cell] = l_fit_err.stderr
# returns the array
return sf_slope, sf_int, sf_err
# appends the fields to the rotation class object
r_data = data.rotation
if not hasattr(r_data, 'sf_fix_slope'):
r_data.sf_fix_slope = None
r_data.sf_fix_int = None
r_data.sf_fix_err = None
r_data.peak_hz_fix = None
# applies the rotation filter to the dataset
r_obj = RotationFilteredData(data, plot_para['rot_filt'], None, None, True, 'Whole Experiment', False)
n_filt = r_obj.n_filt
# determines the common cell indices for each filter types
t_type_full = [x['t_type'][0] for x in r_obj.rot_filt_tot]
i_cell_b, _ = cfcn.get_common_filtered_cell_indices(data, r_obj, t_type_full, True)
# retrieves the spiking frequencies
r_data = data.rotation
sf = dcopy(r_data.vel_sf_mean)
err_type = None if 'err_type' not in calc_para else calc_para['err_type']
norm_sf = False if 'norm_sf' not in calc_para else calc_para['norm_sf']
# sets up the velocity bin values
v_max, v_bin = 80, r_data.vel_bin_corr
xi_bin = np.arange(-v_max + v_bin / 2, v_max, v_bin)
is_pos = xi_bin > 0
n_bin = sum(is_pos)
# memory allocation
A = np.empty((2, n_filt), dtype=object)
sf_slope, sf_int, sf_err, peak_hz = dcopy(A), dcopy(A), dcopy(A), np.empty(n_filt, dtype=object)
if norm_sf:
# for each filter type, calculate the linear fit parameters
dsf_filt = np.empty(n_filt, dtype=object)
peak_hz_filt = np.empty(n_filt, dtype=object)
for i_filt, tt in enumerate(t_type_full):
# calculates the slope/intercept values
sf_filt = sf[tt][i_cell_b[i_filt], :]
#
sf_comb = [np.vstack(sf_filt[:, 0])[:, ::-1], np.vstack(sf_filt[:, 1])]
dsf_filt[i_filt] = [sf - repmat(sf[:, 0], n_bin, 1).T for sf in sf_comb]
# determines the peak frequency
peak_hz_filt[i_filt] = np.max(np.abs(np.hstack((dsf_filt[i_filt][0], dsf_filt[i_filt][1]))), axis=1)
# determines the peak spiking frequency across all conditions
peak_hz = np.max(np.abs(np.vstack(peak_hz_filt)), axis=0)
# for each filter type, calculate the linear fit parameters
for i_filt, tt in enumerate(t_type_full):
# updates the progress bar
w_str = 'Linear Fit Calculations ({0})'.format(tt)
w_prog.emit(w_str, 100. * i_filt / len(t_type_full))
if norm_sf:
# sets the positive/negative spiking frequencies
sf_neg, sf_pos = dsf_filt[i_filt][0], dsf_filt[i_filt][1]
else:
# calculates the slope/intercept values
sf_filt = sf[tt][i_cell_b[i_filt], :]
# sets the positive/negative spiking frequencies
sf_neg, sf_pos = np.vstack(sf_filt[:, 0])[:, ::-1], np.vstack(sf_filt[:, 1])
peak_hz = np.ones(np.shape(sf_neg)[0])
# calculates the spiking freuency slope, intercept and errors
sf_slope[0, i_filt], sf_int[0, i_filt], sf_err[0, i_filt] = \
calc_sf_lin_para(xi_bin[is_pos], sf_neg, peak_hz, err_type)
sf_slope[1, i_filt], sf_int[1, i_filt], sf_err[1, i_filt] = \
calc_sf_lin_para(xi_bin[is_pos], sf_pos, peak_hz, err_type)
#######################################
#### HOUSE-KEEPING EXERCISES ####
#######################################
# sets the class object fields
r_data.sf_fix_slope = sf_slope
r_data.sf_fix_int = sf_int
r_data.sf_fix_err = sf_err
r_data.r_obj_sf = r_obj
r_data.peak_hz_fix = peak_hz
#######################################
#### FREELY MOVING FUNCTIONS ####
#######################################
def calc_cell_fit_residual(self, data, calc_para, w_prog):
'''
:param data:
:param calc_para:
:param w_prog:
:return:
'''
def calc_cell_res_gain(xi, sf_split):
'''
:param sf_cell:
:param xi:
:param is_pos:
:return:
'''
def calc_sf_res(xi, sf):
'''
:param xi:
:param sf:
:return:
'''
# fits a linear equation to the spiking frequencies
l_fit = LinearRegression(fit_intercept=False).fit(xi, sf)
# p_fit = np.polyfit(xi, sf, 1)
# calculates the absolute residual values (normalising by the maximum spiking rate)
return np.abs(l_fit.predict(xi) - sf)
# memory allocation
n_type = np.shape(sf_split)[1]
sf_gain, sf_res = np.empty(n_type, dtype=object), np.empty(n_type, dtype=object)
# calculates the overall spiking frequency maximum
# sf_max = np.max([[np.max(y) for y in x] for x in sf_split])
# if sf_max == 0:
sf_max = np.max([[np.max(np.abs(y)) for y in x] for x in sf_split])
# calculates/sets the residual/gain values for each direction/condition type
for i_type in range(n_type):
sf_gain[i_type] = np.array(cf.flat_list(sf_split[:, i_type]))
sf_res[i_type] = np.array([calc_sf_res(xi, sf / np.max(np.abs(sf))) for sf in sf_split[:, i_type]]).flatten()
# calculates the normalised absolute residuals from the linear fits to the spiking frequencies
return sf_gain, sf_res, sf_max
# initialisations
f_data = data.externd.free_data
# ensures the freely moving class calculation fields have been set (initialies them if they have not)
if not hasattr(f_data, 'sf_gain'):
setattr(f_data, 'sf_gain', None)
setattr(f_data, 'sf_res', None)
setattr(f_data, 'sf_vbin', None)
setattr(f_data, 'sf_tt', None)
setattr(f_data, 'sf_max', None)
# initialisations
t_type = ['DARK', calc_para['lcond_type']]
v_bin, v_max = int(calc_para['vel_bin']), 80.
i_bin = [5, 10].index(v_bin)
i_tt = [list(f_data.t_type).index(tt) for tt in t_type]
# sets up the velocity bin array
xi = np.arange(-v_max + v_bin / 2, v_max, v_bin)
# memory allocation
n_type = len(t_type)
A = np.empty(f_data.n_file, dtype=object)
sf_res, sf_gain, sf_max = dcopy(A), dcopy(A), dcopy(A)
##########################################
#### GAIN/RESIDUAL CALCULATIONS ####
##########################################
# memory allocation and other initialisations
is_pos = xi > 0
n_bin, n_dir = int(len(xi) / 2), 2
# retrieves the spiking frequencies for the velocity bin size
sf_bin = [sf[i_bin] for sf in f_data.s_freq]
# calculates the gain/residuals for each file
for i_file in range(f_data.n_file):
# updates the waitbar progress
w_str = 'Gain/Residual Calculations ({0} of {1})'.format(i_file + 1, f_data.n_file)
w_prog.emit(w_str, 100 * (i_file / f_data.n_file))
# memory allocation
n_cell = np.shape(sf_bin[i_file][0])[0]
B = np.empty((n_cell, n_type), dtype=object)
sf_res[i_file], sf_gain[i_file], sf_max[i_file] = dcopy(B), dcopy(B), np.zeros(n_cell)
# calculates the gain/residuals for each cell/condition type
for i_cell in range(n_cell):
# memory allocation
sf_split = np.empty((n_dir, n_type), dtype=object)
# splits the spiking frequency into positive/negative velocities for each condition type
for i_type in range(n_type):
# retrieves the spiking frequency for the current cell/condition type and separates
sf_cell = sf_bin[i_file][i_tt[i_type]][i_cell]
sf_split0 = [sf_cell[~is_pos][::-1], sf_cell[is_pos]]
# removes the first time bin from each direction
for i_dir in range(n_dir):
sf_split[i_dir, i_type] = sf_split0[i_dir] - sf_split0[i_dir][0]
# calculates the gain/residual for condition type
sf_gain[i_file][i_cell, :], sf_res[i_file][i_cell, :], sf_max[i_file][i_cell] = \
calc_cell_res_gain(xi[is_pos].reshape(-1, 1), sf_split)
#######################################
#### HOUSE-KEEPING EXERCISES ####
#######################################
# sets the class object fields
f_data.sf_gain = sf_gain
f_data.sf_res = sf_res
f_data.sf_vbin = int(calc_para['vel_bin'])
f_data.sf_tt = t_type
f_data.sf_max = sf_max
#########################################
#### ROTATION LDA CALCULATIONS ####
#########################################
def run_temporal_lda(self, data, calc_para, r_filt, i_expt, i_cell, n_trial_max):
'''
:param data:
:param calc_para:
:param r_filt:
:param i_expt:
:param i_cell:
:param n_trial_max:
:return:
'''
# initialisations and memory allocation
d_data, w_prog = data.discrim.temp, self.work_progress
d_data.lda, d_data.y_acc = np.empty(2, dtype=object), np.empty(2, dtype=object)
# retrieves the rotation phase duration
r_obj = RotationFilteredData(data, r_filt, None, None, True, 'Whole Experiment', False)
t_phase = r_obj.t_phase[0][0]
################################################
#### DIFFERING PHASE LDA CALCULATIONS ####
################################################
# creates a copy of the calculation parameters for the differing phase duration LDA calculations
calc_para_phs = dcopy(calc_para)
calc_para_phs['t_ofs_rot'] = 0
# memory allocation
dt_phs = np.arange(calc_para['dt_phase'], t_phase, calc_para['dt_phase'])
d_data.lda[0], d_data.y_acc[0] = np.empty(len(dt_phs), dtype=object), np.empty(len(dt_phs), dtype=object)
# loops through each of the phase discretisations calculating the LDA calculations
n_phs = len(dt_phs)
for i_phs in range(n_phs):
# updates the progress bar
w_str = 'Duration LDA Calculations (Group {0} of {1})'.format(i_phs + 1, n_phs)
w_prog.emit(w_str, 50. * ((i_phs + 1)/ n_phs))
# updates the phase duration parameter
calc_para_phs['t_phase_rot'] = dt_phs[i_phs]
# runs the rotation analysis for the current configuration
result = cfcn.run_rot_lda(data, calc_para_phs, r_filt, i_expt, i_cell, n_trial_max)
if isinstance(result, bool):
# if there was an error, then return a false flag value
return False
else:
# otherwise, store the lda/accuracy values
d_data.lda[0][i_phs], d_data.y_acc[0][i_phs] = result[0], result[1]
#################################################
#### DIFFERING OFFSET LDA CALCULATIONS ####
#################################################
# creates a copy of the calculation parameters for the differing offset LDA calculations
calc_para_ofs = dcopy(calc_para)
calc_para_ofs['t_phase_rot'] = calc_para['t_phase_const']
# sets the differing phase/offset value arrays
dt_ofs = np.arange(0., t_phase - calc_para['t_phase_const'], calc_para['t_phase_const'])
d_data.lda[1], d_data.y_acc[1] = np.empty(len(dt_ofs), dtype=object), np.empty(len(dt_ofs), dtype=object)
# loops through each of the phase discretisations calculating the LDA calculations
n_ofs = len(dt_ofs)
for i_ofs in range(n_ofs):
# updates the progress bar
w_str = 'Offset LDA Calculations (Group {0} of {1})'.format(i_ofs + 1, n_ofs)
w_prog.emit(w_str, 50. * (1 + ((i_ofs + 1) / n_ofs)))
# updates the phase duration parameter
calc_para_ofs['t_ofs_rot'] = dt_ofs[i_ofs]
# runs the rotation analysis for the current configuration
result = cfcn.run_rot_lda(data, calc_para_ofs, r_filt, i_expt, i_cell, n_trial_max)
if isinstance(result, bool):
# if there was an error, then return a false flag value
return False
else:
# otherwise, store the lda/accuracy values
d_data.lda[1][i_ofs], d_data.y_acc[1][i_ofs] = result[0], result[1]
#######################################
#### HOUSE KEEPING EXERCISES ####
#######################################
# retrieves the LDA solver parameter fields
lda_para = calc_para['lda_para']
# sets the solver parameters
d_data.lda = 1
d_data.exp_name = result[2]
d_data.i_expt = i_expt
d_data.i_cell = i_cell
cfcn.set_lda_para(d_data, lda_para, r_filt, n_trial_max)
d_data.lda_trial_type = cfcn.get_glob_para('lda_trial_type')
# sets the other calculation parameters
d_data.dt_phs = calc_para['dt_phase']
d_data.dt_ofs = calc_para['dt_ofs']
d_data.phs_const = calc_para['t_phase_const']
# sets the other variables/parameters of interest
d_data.xi_phs = dt_phs
d_data.xi_ofs = dt_ofs
# returns a true value indicating the calculations were successful
return True
def run_shuffled_lda(self, data, calc_para, r_filt, i_expt, i_cell, n_trial_max):
'''
:param data:
:param calc_para:
:param r_filt:00
:param i_expt:
:param i_cell:
:param n_trial_max:
:return:
'''
# initialisations and memory allocation
d_data, w_prog = data.discrim.shuffle, self.work_progress
if d_data.lda is not None:
return True
# retrieves the phase duration/offset values
t_ofs, t_phase = cfcn.get_rot_phase_offsets(calc_para)
if t_ofs is None:
t_ofs, t_phase = 0, 3.5346
###############################################
#### SHUFFLED TRIAL LDA CALCULATIONS ####
###############################################
# creates a reduce data object and creates the rotation filter object
n_ex, n_sh, n_cond = len(i_expt), calc_para['n_shuffle'], len(r_filt['t_type'])
d_data.y_acc = np.empty((n_ex, n_cond + 1, n_sh), dtype=object)
n_sp = np.empty((n_ex, n_sh), dtype=object)
# runs the LDA for each of the shuffles
for i_sh in range(n_sh):
# updates the progressbar
w_str = 'Shuffled Trial LDA (Shuffle #{0} of {1})'.format(i_sh + 1, n_sh)
w_prog.emit(w_str, 100. * (i_sh / n_sh))
# runs the rotation analysis for the current configuration
result = cfcn.run_rot_lda(data, calc_para, r_filt, i_expt, i_cell, n_trial_max, is_shuffle=True)
if isinstance(result, bool):
# if there was an error, then return a false flag value
return False
else:
# otherwise, store the lda/accuracy values
d_data.y_acc[:, :, i_sh], n_sp[:, i_sh] = result[1], result[3]
if i_sh == 0:
# sets the experiment names (for the first shuffle only)
d_data.exp_name == result[2]
#######################################
#### HOUSE KEEPING EXERCISES ####
#######################################
# retrieves the LDA solver parameter fields
lda_para = calc_para['lda_para']
# sets the solver parameters
d_data.lda = 1
d_data.i_expt = i_expt
d_data.i_cell = i_cell
cfcn.set_lda_para(d_data, lda_para, r_filt, n_trial_max)
d_data.lda_trial_type = cfcn.get_glob_para('lda_trial_type')
# sets the phase offset/duration parameters
d_data.tofs = t_ofs
d_data.tphase = t_phase
d_data.usefull = calc_para['use_full_rot']
# sets the other parameters
d_data.nshuffle = n_sh
# d_data.bsz = calc_para['b_sz']
# calculates the correlations
n_sp_tot = [np.dstack(x) for x in n_sp]
cfcn.calc_noise_correl(d_data, n_sp_tot)
# returns a true value indicating the calculations were successful
return True
def run_individual_lda(self, data, calc_para, r_filt, i_expt, i_cell, n_trial_max):
'''
:param data:
:param calc_para:
:param r_filt:
:param i_expt:
:param i_cell:
:param n_trial_max:
:return:
'''
# initialisations and memory allocation
d_data, w_prog = data.discrim.indiv, self.work_progress
# removes normalisation for the individual cell LDA calculations
_calc_para = dcopy(calc_para)
# _calc_para['lda_para']['is_norm'] = False
################################################
#### INDIVIDUAL CELL LDA CALCULATIONS ####
################################################
# creates a reduce data object and creates the rotation filter object
n_ex = len(i_expt)
A = np.empty(n_ex, dtype=object)
d_data.y_acc, d_data.exp_name = dcopy(A), dcopy(A)
n_cell = [len(i_c) for i_c in i_cell]
#
for i_ex in range(n_ex):
# creates a copy a copy of the accepted cell array for the analysis
_i_cell = np.zeros(n_cell[i_ex], dtype=bool)
_n_cell = np.sum(i_cell[i_ex])
d_data.y_acc[i_ex] = np.zeros((_n_cell, 1 + len(calc_para['lda_para']['comp_cond'])))
# runs the LDA analysis for each of the cells
for i, i_c in enumerate(np.where(i_cell[i_ex])[0]):
# updates the progressbar
w_str = 'Single Cell LDA (Cell {0}/{1}, Expt {2}/{3})'.format(i + 1, _n_cell, i_ex + 1, n_ex)
w_prog.emit(w_str, 100. * (i_ex + i / _n_cell) / n_ex)
# sets the cell for analysis and runs the LDA
_i_cell[i_c] = True
results = cfcn.run_rot_lda(data, _calc_para, r_filt, [i_expt[i_ex]], [_i_cell], n_trial_max)
if isinstance(results, bool):
# if there was an error, then return a false flag value
return False
else:
# otherwise, reset the cell boolear flag
_i_cell[i_c] = False
# stores the results from the single cell LDA
d_data.y_acc[i_ex][i, :] = results[1]
if i == 0:
# if the first iteration, then store the experiment name
d_data.exp_name[i_ex] = results[2]
#######################################
#### HOUSE KEEPING EXERCISES ####
#######################################
# retrieves the LDA solver parameter fields
lda_para = calc_para['lda_para']
t_ofs, t_phase = cfcn.get_rot_phase_offsets(calc_para)
# sets the solver parameters
d_data.lda = 1
d_data.i_expt = i_expt
d_data.i_cell = i_cell
cfcn.set_lda_para(d_data, lda_para, r_filt, n_trial_max)
d_data.lda_trial_type = cfcn.get_glob_para('lda_trial_type')
# sets the phase offset/duration
d_data.tofs = t_ofs
d_data.tphase = t_phase
d_data.usefull = calc_para['use_full_rot']
# returns a true value indicating the calculations were successful
return True
def run_pooled_lda(self, pool, data, calc_para, r_filt, i_expt, i_cell, n_trial_max):
'''
:param data:
:param calc_para:
:param r_filt:
:param i_expt:
:param i_cell:
:param n_trial_max:
:return:
'''
def run_pooled_lda_expt(data, calc_para, r_filt, i_expt0, i_cell0, n_trial_max, n_cell, n_sp0):
'''
:param data:
:param calc_para:
:param r_filt:
:param i_expt:
:param i_cell:
:param n_trial_max:
:param xi:
:return:
'''
while 1:
# sets the required number of cells for the LDA analysis
if calc_para['pool_expt']:
n_sp = n_sp0[:, np.random.permutation(np.size(n_sp0, axis=1))[:n_cell]]
i_cell, i_expt = i_cell0, i_expt0
else:
i_cell = dcopy(i_cell0)
is_keep = np.ones(len(i_expt0), dtype=bool)
for i_ex in range(len(i_expt0)):
# determines the original valid cells for the current experiment
ii = np.where(i_cell0[i_ex])[0]
if len(ii) < n_cell:
is_keep[i_ex] = False
continue
# from these cells, set n_cell cells as being valid (for analysis purposes)
i_cell[i_ex][:] = False
i_cell[i_ex][ii[np.random.permutation(len(ii))][:n_cell]] = True
# removes the experiments which did not have the min number of cells
i_expt, i_cell, n_sp = i_expt0[is_keep], i_cell[is_keep], n_sp0
# runs the LDA
results = cfcn.run_rot_lda(data, calc_para, r_filt, i_expt, i_cell, n_trial_max, n_sp0=n_sp)
if not isinstance(results, bool):
# if successful, then exit the loop
break
# returns the decoding accuracy values
if calc_para['pool_expt']:
return results[1]
else:
# retrieves the results from the LDA
y_acc0 = results[1]
# sets the values into
y_acc = np.nan * np.ones((len(is_keep), np.size(y_acc0, axis=1)))
y_acc[is_keep, :] = y_acc0
return y_acc
# initialisations
d_data = data.discrim.part
w_prog, n_sp = self.work_progress, None
#############################################
#### PARTIAL CELL LDA CALCULATIONS ####
#############################################
# initialisations
if calc_para['pool_expt']:
# case is all experiments are pooled
# initialisations and memory allocation
ind_t, n_sp = np.arange(n_trial_max), []
t_ofs, t_phase = cfcn.get_rot_phase_offsets(calc_para)
# creates a reduce data object and creates the rotation filter object
data_tmp = cfcn.reduce_cluster_data(data, i_expt, True)
r_obj = RotationFilteredData(data_tmp, r_filt, None, None, True, 'Whole Experiment', False,
t_ofs=t_ofs, t_phase=t_phase)
# sets up the LDA data/group index arrays across each condition
for i_filt in range(r_obj.n_filt):
# retrieves the time spikes for the current filter/experiment, and then combines into a single
# concatenated array. calculates the final spike counts over each cell/trial and appends to the
# overall spike count array
A = dcopy(r_obj.t_spike[i_filt])[:, ind_t, :]
if r_obj.rot_filt['t_type'][i_filt] == 'MotorDrifting':
# case is motordrifting (swap phases)
t_sp_tmp = np.hstack((A[:, :, 2], A[:, :, 1]))
else:
# case is other experiment conditions
t_sp_tmp = np.hstack((A[:, :, 1], A[:, :, 2]))
# calculates the spike counts and appends them to the count array
n_sp.append(np.vstack([np.array([len(y) for y in x]) for x in t_sp_tmp]))
# combines the spike counts/group indices into the final combined arrays
n_sp, n_expt, i_expt_lda = np.hstack(n_sp).T, 1, np.array([i_expt[0]])
xi = cfcn.get_pool_cell_counts(data, calc_para['lda_para'], 1)
# reduces the cells to the selected cell type
_, _, i_cell0, _, _ = cfcn.setup_lda(data, {'lda_para': calc_para['lda_para']}, None)
n_sp = n_sp[:, np.hstack(i_cell0)]
i_cell = np.array([np.ones(np.size(n_sp, axis=1), dtype=bool)])
else:
# case is experiments are not pooled
# initialisations
# y_acc_d, n_expt = data.discrim.dir.y_acc, min([3, len(i_expt)])
y_acc_d, n_expt, i_expt_lda = data.discrim.dir.y_acc, len(i_expt), i_expt
# # retrieves the top n_expt experiments based on the base decoding accuracy
# ii = np.sort(np.argsort(-np.prod(y_acc_d, axis=1))[:n_expt])
# i_expt, i_cell = i_expt[ii], i_cell[ii]
# determines the cell count (based on the minimum cell count over all valid experiments)
n_cell_max = np.max([sum(x) for x in i_cell])
xi = [x for x in cfcn.n_cell_pool1 if x <= n_cell_max]
# memory allocation
n_xi, n_sh, n_cond = len(xi), calc_para['n_shuffle'], len(r_filt['t_type'])
d_data.y_acc = np.zeros((n_expt, n_cond + 1, n_xi, n_sh))
# loops through each of the cell counts calculating the partial LDA
for i_sh in range(n_sh):
# updates the progressbar
w_str = 'Pooling LDA Calculations (Shuffle {0} of {1})'.format(i_sh + 1, n_sh)
w_prog.emit(w_str, 100. * (i_sh / n_sh))
# # runs the analysis based on the operating system
# if 'Windows' in platform.platform():
# # case is Richard's local computer
#
# # initialisations and memory allocation
# p_data = [[] for _ in range(n_xi)]
# for i_xi in range(n_xi):
# p_data[i_xi].append(data)
# p_data[i_xi].append(calc_para)
# p_data[i_xi].append(r_filt)
# p_data[i_xi].append(i_expt)
# p_data[i_xi].append(i_cell)
# p_data[i_xi].append(n_trial_max)
# p_data[i_xi].append(xi[i_xi])
#
# # runs the pool object to run the partial LDA
# p_results = pool.map(cfcn.run_part_lda_pool, p_data)
# for i_xi in range(n_xi):
# j_xi = xi.index(p_results[i_xi][0])
# d_data.y_acc[:, :, j_xi, i_sh] = p_results[i_xi][1]
# else:
# case is Subiculum
# initialisations and memory allocation
for i_xi in range(n_xi):
d_data.y_acc[:, :, i_xi, i_sh] = run_pooled_lda_expt(
data, calc_para, r_filt, i_expt_lda, dcopy(i_cell), n_trial_max, xi[i_xi], dcopy(n_sp)
)
#######################################
#### HOUSE KEEPING EXERCISES ####
#######################################
# retrieves the LDA solver parameter fields
lda_para = calc_para['lda_para']
t_ofs, t_phase = cfcn.get_rot_phase_offsets(calc_para)
# sets the solver parameters
d_data.lda = 1
d_data.i_expt = i_expt
d_data.i_cell = i_cell
cfcn.set_lda_para(d_data, lda_para, r_filt, n_trial_max, ignore_list=['n_cell_min'])
d_data.lda_trial_type = cfcn.get_glob_para('lda_trial_type')
# sets the phase offset/duration parametrs
d_data.tofs = t_ofs
d_data.tphase = t_phase
d_data.usefull = calc_para['use_full_rot']
# sets the other parameters/arrays
d_data.nshuffle = n_sh
d_data.poolexpt = calc_para['pool_expt']
d_data.xi = xi
# returns a true value indicating the calculations were successful
return True
def run_wght_lda(self, data, calc_para, r_filt, i_expt, i_cell, n_trial_max):
'''
:param data:
:param calc_para:
:param r_filt:
:param i_expt:
:param i_cell:
:param n_trial_max:
:param d_data:
:param w_prog:
:return:
'''
# initialisations and memory allocation
d_data, w_prog = data.discrim.wght, self.work_progress
if d_data.lda is not None:
# if no change, then exit flagging the calculations are already done
return True
else:
lda_para = calc_para['lda_para']
#######################################
#### LDA WEIGHT CALCULATIONS ####
#######################################
# initialisations
t_ofs, t_phase = cfcn.get_rot_phase_offsets(calc_para)
n_ex, n_tt, n_t, _r_filt = len(i_expt), len(r_filt['t_type']), dcopy(n_trial_max), dcopy(r_filt)
p_wt, p_wex, xi = 1 / n_tt, 1 / n_ex, np.linspace(0, 1, 101)
p_w = p_wt * p_wex
# memory allocation
A, B, C = np.empty((n_ex, n_tt), dtype=object), np.empty(n_ex, dtype=object), np.empty(n_tt, dtype=object)
c_ind, c_wght0 = dcopy(A), dcopy(A)
c_wght, y_top, y_bot = dcopy(C), dcopy(C), dcopy(C)
# reduces down the data cluster to the valid experiments
data_tmp = cfcn.reduce_cluster_data(data, i_expt, True)
# sets the LDA solver type
lda = cfcn.setup_lda_solver(lda_para)
# creates a reduce data object and creates the rotation filter object
for i_tt, tt in enumerate(r_filt['t_type']):
# retrieves the rotation filter for the current
_r_filt['t_type'] = [tt]
r_obj = RotationFilteredData(data_tmp, _r_filt, None, None, True, 'Whole Experiment', False,
t_ofs=t_ofs, t_phase=t_phase)
# memory allocation
y_acc_bot, y_acc_top, c_wght_ex = dcopy(B), dcopy(B), dcopy(B)
# calculates the cell weight scores for each experiment
for i_ex in range(n_ex):
# updates the progress bar
w_str = 'Weighting LDA ({0}, Expt {1}/{2}'.format(tt, i_ex + 1, n_ex)
p_w0 = p_wt * (i_tt + p_wex * i_ex)
# retrieves the spike counts for the current experiment
n_sp, i_grp = cfcn.setup_lda_spike_counts(r_obj, i_cell[i_ex], i_ex, n_t, return_all=False)
try:
# normalises the spike counts and fits the lda model
n_sp_norm = cfcn.norm_spike_counts(n_sp, 2 * n_t, lda_para['is_norm'])
lda.fit(n_sp_norm, i_grp)
except:
if w_prog is not None:
e_str = 'There was an error running the LDA analysis with the current solver parameters. ' \
'Either choose a different solver or alter the solver parameters before retrying'
w_prog.emit(e_str, 'LDA Analysis Error')
return False
# retrieves the coefficients from the LDA solver
coef0 = dcopy(lda.coef_)
coef0 /= np.max(np.abs(coef0))
# sets the sorting indices and re-orders the weights
c_ind[i_ex, i_tt] = np.argsort(-np.abs(coef0))[0]
c_wght0[i_ex, i_tt] = coef0[0, c_ind[i_ex, i_tt]]
n_sp = n_sp[:, c_ind[i_ex, i_tt]]
# calculates the top/bottom removed cells lda performance
y_acc_bot[i_ex] = cfcn.run_reducing_cell_lda(w_prog, lda, lda_para, n_sp, i_grp, p_w0, p_w/2, w_str, True)
y_acc_top[i_ex] = cfcn.run_reducing_cell_lda(w_prog, lda, lda_para, n_sp, i_grp, p_w0+p_w/2, p_w/2, w_str)
# calculates the interpolated bottom/top removed values
c_wght[i_tt] = interp_arr(xi, np.abs(c_wght0[:, i_tt]))
y_bot[i_tt], y_top[i_tt] = interp_arr(xi, y_acc_bot), interp_arr(xi, y_acc_top)
#######################################
#### HOUSE KEEPING EXERCISES ####
#######################################
# sets the solver parameters
d_data.lda = 1
d_data.i_expt = i_expt
d_data.i_cell = i_cell
cfcn.set_lda_para(d_data, lda_para, r_filt, n_trial_max)
d_data.lda_trial_type = cfcn.get_glob_para('lda_trial_type')
# sets the phase offset/duration parametrs
d_data.tofs = t_ofs
d_data.tphase = t_phase
d_data.usefull = calc_para['use_full_rot']
# sets the other parameters
d_data.xi = xi
d_data.c_ind = c_ind
d_data.c_wght = c_wght
d_data.c_wght0 = c_wght0
d_data.y_acc_bot = y_bot
d_data.y_acc_top = y_top
# return the calculations were a success
return True
##########################################
#### KINEMATIC LDA CALCULATIONS ####
##########################################
def run_speed_lda_accuracy(self, data, calc_para, r_filt, i_expt, i_cell, n_trial, w_prog):
'''
:param data:
:param calc_para:
:param r_filt:
:param i_expt:
:param i_cell:
:param n_trial:
:param w_prog:
:return:
'''
# initialisations
d_data = data.discrim.spdacc
# reduces down the cluster data array
_data = cfcn.reduce_cluster_data(data, i_expt, True)
# sets up the kinematic LDA spiking frequency array
w_prog.emit('Setting Up LDA Spiking Frequencies...', 0.)
spd_sf, _r_filt = cfcn.setup_kinematic_lda_sf(_data, r_filt, calc_para, i_cell, n_trial, w_prog)
# case is the normal kinematic LDA
if not cfcn.run_full_kinematic_lda(_data, dcopy(spd_sf), calc_para, _r_filt, n_trial, w_prog, d_data):
# if there was an error then exit with a false flag
return False
#######################################
#### HOUSE-KEEPING EXERCISES ####
#######################################
# sets the lda values
d_data.i_expt = i_expt
d_data.i_cell = i_cell
# returns a true value indicating success
return True
def run_kinematic_lda(self, data, calc_para, r_filt, i_expt, i_cell, n_trial, w_prog):
'''
:param calc_para:
:param r_filt:
:param i_expt:
:param i_cell:
:param n_trial:
:param w_prog:
:param d_data:
:return:
'''
# initialisations
d_data = data.discrim.spdc
# reduces down the cluster data array
_data = cfcn.reduce_cluster_data(data, i_expt, True)
# sets up the kinematic LDA spiking frequency array
w_prog.emit('Setting Up LDA Spiking Frequencies...', 0.)
spd_sf, _r_filt = cfcn.setup_kinematic_lda_sf(_data, r_filt, calc_para, i_cell, n_trial, w_prog)
# case is the normal kinematic LDA
if not cfcn.run_kinematic_lda(_data, spd_sf, calc_para, _r_filt, n_trial, w_prog=w_prog, d_data=d_data):
# if there was an error then exit with a false flag
return False
#######################################
#### HOUSE-KEEPING EXERCISES ####
#######################################
# sets the lda values
d_data.i_expt = i_expt
d_data.i_cell = i_cell
# returns a true value indicating success
return True
def run_pooled_kinematic_lda(self, data, calc_para, r_filt, i_expt, i_cell, n_trial, w_prog, r_data_type='rotation'):
'''
:param data:
:param calc_para:
:param r_filt:
:param i_expt:
:param i_cell:
:param n_trial:
:param w_prog:
:return:
'''
# initialisations
d_data = data.discrim.spdcp
tt, lda_para, n_shuff = r_filt['t_type'], calc_para['lda_para'], calc_para['n_shuffle']
###########################################
#### PRE-PROCESSING CALCULATIONS ####
###########################################
# reduces down the cluster data array
_data = cfcn.reduce_cluster_data(data, i_expt, True)
# sets up the kinematic LDA spiking frequency array
w_prog.emit('Setting Up LDA Spiking Frequencies...', 0.)
spd_sf, _r_filt = cfcn.setup_kinematic_lda_sf(_data, r_filt, calc_para, i_cell, n_trial,
w_prog, is_pooled=calc_para['pool_expt'])
##############################################
#### POOLED NEURON LDA CALCULATIONS ####
##############################################
# retrieves the rotation data class
r_data = _data.rotation
# determines the cell pool groupings
if calc_para['pool_expt']:
n_cell, is_keep = cfcn.get_pool_cell_counts(data, lda_para), []
else:
n_cell_ex = [sum(x) for x in i_cell]
n_cell = [x for x in cfcn.n_cell_pool1 if x <= np.max(n_cell_ex)]
# memory allocation
n_cell_pool = n_cell[-1]
n_ex = 1 if calc_para['pool_expt'] else len(i_cell)
nC, n_tt, n_xi = len(n_cell), len(tt), len(r_data.spd_xi)
y_acc = [np.nan * np.ones((n_shuff, n_xi, nC, n_ex)) for _ in range(n_tt)]
#
for i_c, n_c in enumerate(n_cell):
n_shuff_nw = n_shuff if (((i_c + 1) < nC) or (not calc_para['pool_expt'])) else 1
for i_s in range(n_shuff_nw):
# updates the progressbar
w_str = 'Speed LDA (G:{0}/{1}, Sh:{2}/{3}'.format(i_c + 1, nC, i_s + 1, n_shuff_nw)
pw0 = 100. * (i_c + (i_s / n_shuff_nw)) / nC
while 1:
# sets the new shuffled spiking frequency array (over all expt)
if calc_para['pool_expt']:
# case all cells are pooled over all experiments
spd_sf_sh = [set_sf_cell_perm(dcopy(spd_sf), n_cell_pool, n_c)]
else:
# case all cells
is_keep = np.array(n_cell_ex) >= n_c
spd_sf_sh = [set_sf_cell_perm(x, n_ex, n_c) for x, n_ex, is_k in
zip(dcopy(spd_sf), n_cell_ex, is_keep) if is_k]
# runs the kinematic LDA on the new data
n_ex_sh = 1 if calc_para['pool_expt'] else sum(is_keep)
results = cfcn.run_kinematic_lda(_data, spd_sf_sh, calc_para, _r_filt, n_trial, w_prog=w_prog,
w_str0=w_str, pw0=pw0)
if not isinstance(results, bool):
# if successful, then retrieve the accuracy values
for i_tt in range(n_tt):
for i_ex in range(n_ex_sh):
y_acc[i_tt][i_s, :, i_c, i_ex] = results[0][i_ex, :, i_tt]
# exits the loop
break
#######################################
#### HOUSE-KEEPING EXERCISES ####
#######################################
# sets a copy of the lda parameters and updates the comparison conditions
_lda_para = dcopy(lda_para)
_lda_para['comp_cond'] = r_data.r_obj_kine.rot_filt['t_type']
# sets the lda values
d_data.lda = 1
d_data.y_acc = y_acc
d_data.i_expt = i_expt
d_data.i_cell = i_cell
d_data.n_cell = n_cell
d_data.exp_name = [os.path.splitext(os.path.basename(x['expFile']))[0] for x in _data.cluster]
d_data.lda_trial_type = cfcn.get_glob_para('lda_trial_type')
# sets the rotation values
d_data.spd_xi = r_data.spd_xi
d_data.i_bin_spd = r_data.i_bin_spd
# sets the solver parameters
cfcn.set_lda_para(d_data, _lda_para, r_filt, n_trial)
# sets the phase duration/offset parameters
d_data.spd_xrng = calc_para['spd_x_rng']
d_data.vel_bin = calc_para['vel_bin']
d_data.n_sample = calc_para['n_sample']
d_data.equal_time = calc_para['equal_time']
d_data.nshuffle = calc_para['n_shuffle']
d_data.poolexpt = calc_para['pool_expt']
# returns a true value indicating success
return True
def run_speed_dir_lda_accuracy(self, data, calc_para, r_filt, i_expt, i_cell, n_trial, w_prog):
'''
:param calc_para:
:param r_filt:
:param i_expt:
:param i_cell:
:param n_trial_max:
:param w_prog:
:return:
'''
# initialisations
d_data = data.discrim.spddir
# reduces down the cluster data array
_data = cfcn.reduce_cluster_data(data, i_expt, True)
# sets up the kinematic LDA spiking frequency array
w_prog.emit('Setting Up LDA Spiking Frequencies...', 0.)
vel_sf, _r_filt = cfcn.setup_kinematic_lda_sf(_data, r_filt, calc_para, i_cell, n_trial, w_prog, use_spd=False)
# case is the normal kinematic LDA
if not cfcn.run_vel_dir_lda(_data, dcopy(vel_sf), calc_para, _r_filt, n_trial, w_prog, d_data):
# if there was an error then exit with a false flag
return False
#######################################
#### HOUSE-KEEPING EXERCISES ####
#######################################
# sets the lda values
d_data.i_expt = i_expt
d_data.i_cell = i_cell
# returns a true value indicating success
return True
######################################
#### ROC CURVE CALCULATIONS ####
######################################
def calc_partial_roc_curves(self, data, calc_para, plot_para, pW, r_data=None):
'''
:param data:
:param calc_para:
:param plot_para:
:param pW:
:return:
'''
# initialises the RotationData class object (if not provided)
if r_data is None:
r_data = data.rotation
# memory allocation
r_data.part_roc, r_data.part_roc_xy, r_data.part_roc_auc = {}, {}, {}
# initisalises the rotational filter (if not initialised already)
if plot_para['rot_filt'] is None:
plot_para['rot_filt'] = cf.init_rotation_filter_data(False)
# calculates the partial roc curves for each of the trial conditions
for tt in plot_para['rot_filt']['t_type']:
# if tt not in r_data.part_roc:
r_data.part_roc[tt], r_data.part_roc_xy[tt], r_data.part_roc_auc[tt] = \
self.calc_phase_roc_curves(data, calc_para, pW, t_type=tt, r_data=None)
def calc_phase_roc_curves(self, data, calc_para, pW, t_type=None, r_data=None):
'''
:param calc_para:
:param plot_para:
:param data:
:param pool:
:return:
'''
# parameters and initialisations
phase_str = ['CW/BL', 'CCW/BL', 'CCW/CW']
if r_data is None:
r_data = data.rotation
# if the black phase is calculated already, then exit the function
if (r_data.phase_roc is not None) and (t_type is None):
return
# retrieves the offset parameters
t_ofs, t_phase = cfcn.get_rot_phase_offsets(calc_para)
# sets up the black phase data filter and returns the time spikes
r_filt = cf.init_rotation_filter_data(False)
if t_type is None:
r_data.r_obj_black = r_obj = RotationFilteredData(data, r_filt, 0, None, True, 'Whole Experiment', False,
t_phase=t_phase, t_ofs=t_ofs)
else:
r_filt['t_type'] = [t_type]
r_obj = RotationFilteredData(data, r_filt, 0, None, True, 'Whole Experiment', False,
t_phase=t_phase, t_ofs=t_ofs)
# retrieves the time spikes and sets the roc class fields for update
t_spike = r_obj.t_spike[0]
# memory allocation
n_cell = np.size(t_spike, axis=0)
roc = np.empty((n_cell, len(phase_str)), dtype=object)
roc_xy = np.empty(n_cell, dtype=object)
roc_auc = np.ones((n_cell, len(phase_str)))
# calculates the roc curves/integrals for all cells over each phase
for i_phs, p_str in enumerate(phase_str):
# updates the progress bar string
w_str = 'ROC Curve Calculations ({0})...'.format(p_str)
self.work_progress.emit(w_str, pW * i_phs / len(phase_str))
# calculates the bootstrapped confidence intervals for each cell
ind = np.array([1 * (i_phs > 1), 1 + (i_phs > 0)])
for i_cell in range(n_cell):
# calculates the roc curve/auc integral
roc[i_cell, i_phs] = cf.calc_roc_curves(t_spike[i_cell, :, :], ind=ind)
roc_auc[i_cell, i_phs] = cf.get_roc_auc_value(roc[i_cell, i_phs])
# if the CW/CCW phase interaction, then set the roc curve x/y coordinates
if (i_phs + 1) == len(phase_str):
roc_xy[i_cell] = cf.get_roc_xy_values(roc[i_cell, i_phs])
# case is the rotation (black) condition
if t_type is None:
r_data.phase_roc, r_data.phase_roc_xy, r_data.phase_roc_auc = roc, roc_xy, roc_auc
else:
return roc, roc_xy, roc_auc
def calc_ud_roc_curves(self, data, r_obj_vis, ind_type, pW, r_data=None):
'''
:param data:
:param r_obj_vis:
:param calc_para:
:param pW:
:return:
'''
# initialises the RotationData class object (if not provided)
if r_data is None:
r_data = data.rotation
# parameters and initialisations
t_spike = r_obj_vis.t_spike
phase_str, ind = ['CW/BL', 'CCW/BL', 'CCW/CW'], np.array([0, 1])
# array indexing values
n_filt = round(r_obj_vis.n_filt / 2)
n_trial = min([np.shape(x)[1] for x in t_spike])
n_cell_expt = [x['nC'] for x in np.array(data.cluster)[cf.det_valid_rotation_expt(data, is_ud=True)]]
n_cell = sum(n_cell_expt)
# sets up the global index arrays
i_ofs = np.concatenate(([0], np.cumsum(n_cell_expt[:-1])))
i_cell_g = [i0 + np.arange(nC) for i0, nC in zip(i_ofs, n_cell_expt) if nC > 0]
# if the uniformdrifting phase is calculated already, then exit the function
if r_data.phase_roc_ud is not None:
return
# memory allocation
roc = np.empty((n_cell, len(phase_str)), dtype=object)
roc_xy = np.empty(n_cell, dtype=object)
roc_auc = np.ones((n_cell, len(phase_str)))
for i_filt in range(n_filt):
# sets the time spike array and global cell indices array
ind_CC, ind_CCW = ind_type[0][i_filt], ind_type[1][i_filt]
ig_cell = cf.flat_list([ig[ind] for ig, ind in zip(i_cell_g, r_obj_vis.clust_ind[i_filt])])
# sets the number of cells to be analysed for the current filter
n_cell_f = np.shape(t_spike[ind_CC])[0]
# calculates the roc curves/integrals for all cells over each phase
for i_phs, p_str in enumerate(phase_str):
# updates the progress bar string
w_str = 'ROC Curve Calculations ({0})...'.format(p_str)
self.work_progress.emit(w_str, 100 * pW * ((i_filt / n_filt) + (i_phs / len(phase_str))))
# loops through each of the cells calculating the roc curves (and associated values)
for i_cell in range(n_cell_f):
# sets the time spike arrays depending on the phase type
if (i_phs + 1) == len(phase_str):
t_spike_phs = np.vstack((t_spike[ind_CC][i_cell, :n_trial, 1],
t_spike[ind_CCW][i_cell, :n_trial, 1])).T
else:
t_spike_phs = t_spike[ind_type[i_phs][i_filt]][i_cell, :, :]
# calculates the roc curve/auc integral
ig_nw = int(ig_cell[i_cell])
roc[ig_nw, i_phs] = cf.calc_roc_curves(t_spike_phs, ind=np.array([0, 1]))
roc_auc[ig_nw, i_phs] = cf.get_roc_auc_value(roc[ig_nw, i_phs])
# if the CW/CCW phase interaction, then set the roc curve x/y coordinates
if (i_phs + 1) == len(phase_str):
roc_xy[ig_nw] = cf.get_roc_xy_values(roc[ig_nw, i_phs])
# sets the final
r_data.phase_roc_ud, r_data.phase_roc_xy_ud, r_data.phase_roc_auc_ud = roc, roc_xy, roc_auc
def calc_cond_roc_curves(self, data, pool, calc_para, plot_para, g_para, calc_cell_grp, pW,
force_black_calc=False, r_data=None):
'''
:param calc_para:
:param plot_para:
:param data:
:param pool:
:return:
'''
# initialises the RotationData class object (if not provided)
if r_data is None:
r_data = data.rotation
# parameters and initialisations
t_ofs, t_phase = cfcn.get_rot_phase_offsets(calc_para)
r_obj_sig, plot_scope, c_lvl = None, 'Whole Experiment', float(g_para['roc_clvl'])
phase_str = ['CW/BL', 'CCW/BL', 'CCW/CW']
# initisalises the rotational filter (if not initialised already)
if plot_para['rot_filt'] is None:
plot_para['rot_filt'] = cf.init_rotation_filter_data(False)
# sets the condition types (ensures that the black phase is always included)
t_type = dcopy(plot_para['rot_filt']['t_type'])
if 'Black' not in t_type:
t_type = ['Black'] + t_type
if 'vis_expt_type' in calc_para:
if calc_para['vis_expt_type'] == 'MotorDrifting':
t_type += ['MotorDrifting']
# retrieves the rotation phase offset time/duration
if t_ofs is not None:
# if the values are not none, and do not match previous values, then reset the stored roc array
if (r_data.t_ofs_rot != t_ofs) or (r_data.t_phase_rot != t_phase):
r_data.t_ofs_rot, r_data.t_phase_rot, r_data.cond_roc = t_ofs, t_phase, None
elif 'use_full_rot' in calc_para:
# if using the full rotation, and the previous calculations were made using non-full rotation phases,
# the reset the stored roc array
if (r_data.t_ofs_rot > 0):
r_data.t_ofs_rot, r_data.t_phase_rot, r_data.cond_roc = -1, -1, None
# sets up a base filter with only the
r_filt_base = cf.init_rotation_filter_data(False)
r_filt_base['t_type'] = [x for x in t_type if x != 'UniformDrifting']
# sets up the black phase data filter and returns the time spikes
r_obj = RotationFilteredData(data, r_filt_base, None, plot_para['plot_exp_name'], True, plot_scope, False,
t_ofs=t_ofs, t_phase=t_phase)
if not r_obj.is_ok:
# if there was an error, then output an error to screen
self.work_error.emit(r_obj.e_str, 'Incorrect Analysis Function Parameters')
return False
# memory allocation (if the conditions have not been set)
if r_data.cond_roc is None:
r_data.cond_roc, r_data.cond_roc_xy, r_data.cond_roc_auc = {}, {}, {}
r_data.cond_gtype, r_data.cond_auc_sig, r_data.cond_i_expt, r_data.cond_cl_id = {}, {}, {}, {}
r_data.cond_ci_lo, r_data.cond_ci_hi, r_data.r_obj_cond = {}, {}, {}
r_data.phase_gtype, r_data.phase_auc_sig, r_data.phase_roc = None, None, None
for i_rr, rr in enumerate(r_obj.rot_filt_tot):
# sets the trial type
tt = rr['t_type'][0]
# updates the progress bar string
w_str = 'ROC Curve Calculations ({0})...'.format(tt)
self.work_progress.emit(w_str, pW * (i_rr / r_obj.n_filt))
if tt not in r_data.cond_roc:
# array dimensions
t_spike = r_obj.t_spike[i_rr]
n_cell = np.size(t_spike, axis=0)
# memory allocation and initialisations
r_data.cond_roc[tt] = np.empty((n_cell, 3), dtype=object)
r_data.cond_roc_xy[tt] = np.empty(n_cell, dtype=object)
r_data.cond_roc_auc[tt] = np.zeros((n_cell, 3))
r_data.cond_gtype[tt] = -np.ones((n_cell, 3))
r_data.cond_auc_sig[tt] = np.zeros((n_cell, 3), dtype=bool)
r_data.cond_i_expt[tt] = r_obj.i_expt[i_rr]
r_data.cond_cl_id[tt] = r_obj.cl_id[i_rr]
r_data.cond_ci_lo[tt] = -np.ones((n_cell, 2))
r_data.cond_ci_hi[tt] = -np.ones((n_cell, 2))
r_data.r_obj_cond[tt] = dcopy(r_obj)
# calculates the roc curves/integrals for all cells over each phase
for i_phs, p_str in enumerate(phase_str):
# updates the progress bar string
self.work_progress.emit(w_str, pW * ((i_rr / r_obj.n_filt) + (i_phs / len(phase_str))))
# calculates the roc curve values for each phase
ind = np.array([1 * (i_phs > 1), 1 + (i_phs > 0)])
for ic in range(n_cell):
r_data.cond_roc[tt][ic, i_phs] = cf.calc_roc_curves(t_spike[ic, :, :], ind=ind)
r_data.cond_roc_auc[tt][ic, i_phs] = cf.get_roc_auc_value(r_data.cond_roc[tt][ic, i_phs])
if (i_phs + 1) == len(phase_str):
r_data.cond_roc_xy[tt][ic] = cf.get_roc_xy_values(r_data.cond_roc[tt][ic, i_phs])
# calculates the confidence intervals for the current (only if bootstrapping count has changed or the
# confidence intervals has not already been calculated)
if 'auc_stype' in calc_para:
# updates the auc statistics calculation type
r_data.cond_auc_stats_type = calc_para['auc_stype']
# determine if the auc confidence intervals need calculation
is_boot = int(calc_para['auc_stype'] == 'Bootstrapping')
if is_boot:
# if bootstrapping, then determine if the
if r_data.n_boot_cond_ci != calc_para['n_boot']:
# if the bootstrapping count has changed, flag that the confidence intervals needs updating
r_data.n_boot_cond_ci, calc_ci = calc_para['n_boot'], True
else:
# otherwise, recalculate the confidence intervals if they have not been set
calc_ci = np.any(r_data.cond_ci_lo[tt][:, 1] < 0)
else:
# otherwise, recalculate the confidence intervals if they have not been set
calc_ci = np.any(r_data.cond_ci_lo[tt][:, 0] < 0)
# calculates the confidence intervals (if required)
if calc_ci:
conf_int = self.calc_roc_conf_intervals(pool, r_data.cond_roc[tt][:, 2],
calc_para['auc_stype'], calc_para['n_boot'], c_lvl)
r_data.cond_ci_lo[tt][:, is_boot] = conf_int[:, 0]
r_data.cond_ci_hi[tt][:, is_boot] = conf_int[:, 1]
# if not calculating the cell group indices, or the condition type is Black (the phase statistics for
# this condition are already calculated in "calc_phase_roc_significance"), then continue
if (not calc_cell_grp) or ((tt == 'Black') and (not force_black_calc)):
continue
# sets the rotation object filter (if using wilcoxon paired test for the cell group stats type)
if calc_para['grp_stype'] == 'Wilcoxon Paired Test':
if np.all(r_data.cond_gtype[tt][:, 0] >= 0):
# if all the values have been calculated, then exit the function
continue
# sets the rotation object for the current condition
r_obj_sig = RotationFilteredData(data, r_obj.rot_filt_tot[i_rr], None, plot_para['plot_exp_name'],
True, plot_scope, False, t_ofs=t_ofs, t_phase=t_phase)
if not r_obj_sig.is_ok:
# if there was an error, then output an error to screen
self.work_error.emit(r_obj_sig.e_str, 'Incorrect Analysis Function Parameters')
return False
# calculates the condition cell group types
self.calc_phase_roc_significance(calc_para, g_para, data, pool, None, c_type='cond',
roc=r_data.cond_roc[tt], auc=r_data.cond_roc_auc[tt],
g_type=r_data.cond_gtype[tt], auc_sig=r_data.cond_auc_sig[tt],
r_obj=r_obj_sig)
# returns a true value
return True
def calc_phase_roc_significance(self, calc_para, g_para, data, pool, pW, c_type='phase',
roc=None, auc=None, g_type=None, auc_sig=None, r_obj=None, r_data=None):
'''
:param calc_data:
:param data:
:param pool:
:return:
'''
# initialises the RotationData class object (if not provided)
if r_data is None:
r_data = data.rotation
# sets the roc objects/integrals (if not provided)
c_lvl = float(g_para['roc_clvl'])
if c_type == 'phase':
# case is the significance tests are being calculated for the phase
r_data.phase_grp_stats_type = calc_para['grp_stype']
roc, auc, r_obj = r_data.phase_roc, r_data.phase_roc_auc, r_data.r_obj_black
else:
# case is the significance tests are being calculated for the conditions
r_data.cond_grp_stats_type = calc_para['grp_stype']
# parameters and initialisations
phase_str, i_col = ['CW/BL', 'CCW/BL', 'CCW/CW'], 0
p_value, n_cell = 0.05, np.size(roc, axis=0)
# allocates memory for the group-types (if not already calculated)
if c_type == 'phase':
# case is for the phase type
n_boot = r_data.n_boot_phase_grp
if r_data.phase_gtype is None:
# group type has not been set, so initialise the array
r_data.phase_gtype = g_type = -np.ones((n_cell, 3))
r_data.phase_auc_sig = auc_sig = np.zeros((n_cell, 3), dtype=bool)
else:
# otherwise, retrieve the currently stored array
g_type, auc_sig = r_data.phase_gtype, r_data.phase_auc_sig
else:
# case is for the condition type
n_boot = r_data.n_boot_cond_grp
#########################################
#### WILCOXON STATISTICAL TEST ####
#########################################
if calc_para['grp_stype'] == 'Wilcoxon Paired Test':
# if the statistics have already been calculated, then exit the function
if np.all(g_type[:, 0] >= 0):
return
# updates the progress bar string
if pW is not None:
self.work_progress.emit('Calculating Wilcoxon Stats...', pW + 25.)
# calculates the statistical significance between the phases
sp_f0, sp_f = cf.calc_phase_spike_freq(r_obj)
_, _, sf_stats, _ = cf.setup_spike_freq_plot_arrays(r_obj, sp_f0, sp_f, None)
# determines which cells are motion/direction sensitive
for i_phs in range(len(sf_stats)):
auc_sig[:, i_phs] = sf_stats[i_phs] < p_value
##########################################
#### ROC-BASED STATISTICAL TEST ####
##########################################
else:
# determines what kind of statistics are to be calculated
is_boot = calc_para['grp_stype'] == 'Bootstrapping'
i_col, phase_stype = 1 + is_boot, calc_para['grp_stype']
# if the statistics have been calculated for the selected type, then exit the function
if is_boot:
if np.all(g_type[:, 2] >= 0) and (calc_para['n_boot'] == n_boot):
# if bootstrapping is selected, but all values have been calculated and the bootstrapping values
# has not changed, then exit the function
return
else:
# otherwise, update the bootstrapping count
if c_type == 'phase':
r_data.n_boot_phase_grp = dcopy(calc_para['n_boot'])
else:
r_data.n_boot_cond_grp = dcopy(calc_para['n_boot'])
elif np.all(g_type[:, 1] >= 0):
# if delong significance is selected, and all values have been calculated, then exit the function
return
# calculates the significance for each phase
for i_phs, p_str in enumerate(phase_str):
# updates the progress bar string
if pW is not None:
w_str = 'ROC Curve Calculations ({0})...'.format(p_str)
self.work_progress.emit(w_str, pW * (1. + i_phs / len(phase_str)))
# calculates the confidence intervals for the current
conf_int = self.calc_roc_conf_intervals(pool, roc[:, i_phs], phase_stype, n_boot, c_lvl)
# determines the significance for each cell in the phase
auc_ci_lo = (auc[:, i_phs] + conf_int[:, 1]) < 0.5
auc_ci_hi = (auc[:, i_phs] - conf_int[:, 0]) > 0.5
auc_sig[:, i_phs] = np.logical_or(auc_ci_lo, auc_ci_hi)
# calculates the cell group types
g_type[:, i_col] = cf.calc_cell_group_types(auc_sig, calc_para['grp_stype'])
def calc_dirsel_group_types(self, data, pool, calc_para, plot_para, g_para, r_data=None):
'''
:param data:
:param plot_para:
:return:
'''
def calc_combined_spiking_stats(r_data, r_obj, pool, calc_para, g_para, p_value, ind_type=None,
t_type='Black'):
'''
:param r_obj:
:param ind_type:
:return:
'''
# calculates the individual trial/mean spiking rates and sets up the plot/stats arrays
sp_f0, sp_f = cf.calc_phase_spike_freq(r_obj)
s_plt, _, sf_stats, i_grp = cf.setup_spike_freq_plot_arrays(r_obj, sp_f0, sp_f, ind_type)
# calculates the CW/CCW spiking frequency ratio
r_CCW_CW = np.array(s_plt[2][1]) / np.array(s_plt[2][0])
#########################################
#### WILCOXON STATISTICAL TEST ####
#########################################
if calc_para['grp_stype'] == 'Wilcoxon Paired Test':
# case is the wilcoxon paired test
sf_scores = cf.calc_ms_scores(s_plt, sf_stats, p_value)
##########################################
#### ROC-BASED STATISTICAL TEST ####
##########################################
else:
# determines what kind of statistics are to be calculated
phase_stype = calc_para['grp_stype']
is_boot, n_boot = calc_para['grp_stype'] == 'Bootstrapping', calc_para['n_boot']
phase_str, c_lvl, pW = ['CW/BL', 'CCW/BL', 'CCW/CW'], float(g_para['roc_clvl']), 100.
# retrieves the roc/auc fields (depending on the type)
if t_type == 'Black':
# case is the black (rotation) condition
roc, auc = r_data.phase_roc, r_data.phase_roc_auc
elif t_type == 'UniformDrifting':
# case is the uniformdrifting (visual) condition
roc, auc = r_data.phase_roc_ud, r_data.phase_roc_auc_ud
else:
# case is the motordrifting (visual) condition
roc, auc = r_data.cond_roc['MotorDrifting'], r_data.cond_roc_auc['MotorDrifting']
# REMOVE ME LATER?
c_lvl = 0.95
# if the statistics have been calculated for the selected type, then exit the function
if is_boot:
# otherwise, update the bootstrapping count
r_data.n_boot_comb_grp = dcopy(calc_para['n_boot'])
# calculates the significance for each phase
auc_sig = np.zeros((np.size(roc, axis=0), 3), dtype=bool)
for i_phs, p_str in enumerate(phase_str):
# updates the progress bar string
if pW is not None:
w_str = 'ROC Curve Calculations ({0})...'.format(p_str)
self.work_progress.emit(w_str, pW * (i_phs / len(phase_str)))
# calculates the confidence intervals for the current
conf_int = self.calc_roc_conf_intervals(pool, roc[:, i_phs], phase_stype, n_boot, c_lvl)
# determines the significance for each cell in the phase
auc_ci_lo = (auc[:, i_phs] + conf_int[:, 1]) < 0.5
auc_ci_hi = (auc[:, i_phs] - conf_int[:, 0]) > 0.5
auc_sig[:, i_phs] = np.logical_or(auc_ci_lo, auc_ci_hi)
# case is the wilcoxon paired test
sf_scores = np.zeros((np.size(roc, axis=0), 3), dtype=int)
for ig in i_grp:
sf_scores[ig, :] = cf.calc_ms_scores(auc[ig, :], auc_sig[ig, :], None)
# returns the direction selectivity scores
return sf_scores, i_grp, r_CCW_CW
def det_dirsel_cells(sf_score, grp_stype):
'''
:param sf_score:
:return:
'''
# calculates the minimum/sum scores
if grp_stype == 'Wilcoxon Paired Test':
score_min, score_sum = np.min(sf_score[:, :2], axis=1), np.sum(sf_score[:, :2], axis=1)
# determines the direction selective cells, which must meet the following conditions:
# 1) one direction only produces a significant result, OR
# 2) both directions are significant AND the CW/CCW comparison is significant
one_dir_sig = np.logical_and(score_min == 0, score_sum > 0) # cells where one direction is significant
both_dir_sig = np.min(sf_score[:, :2], axis=1) > 0 # cells where both CW/CCW is significant
comb_dir_sig = sf_score[:, -1] > 0 # cells where CW/CCW difference is significant
# determines which cells are direction selective (removes non-motion sensitive cells)
return np.logical_or(one_dir_sig, np.logical_and(both_dir_sig, comb_dir_sig)).astype(int)
else:
# case is the roc analysis statistics (only consider the CW/CCW comparison for ds)
return sf_score[:, 2] > 0
# initialises the RotationData class object (if not provided)
if r_data is None:
r_data = data.rotation
# initialises the rotation filter (if not set)
rot_filt = plot_para['rot_filt']
if rot_filt is None:
rot_filt = cf.init_rotation_filter_data(False)
# sets the p-value
if 'p_value' in calc_para:
p_val = calc_para['p_value']
else:
p_val = 0.05
# initialisations and memory allocation
p_scope, n_grp, r_data, grp_stype = 'Whole Experiment', 4, r_data, calc_para['grp_stype']
# r_filt_rot, r_filt_vis = dcopy(rot_filt), dcopy(rot_filt)
plot_exp_name, plot_all_expt = plot_para['plot_exp_name'], plot_para['plot_all_expt']
r_data.ds_p_value = dcopy(p_val)
t_ofs_rot, t_phase_rot = cfcn.get_rot_phase_offsets(calc_para)
t_ofs_vis, t_phase_vis = cfcn.get_rot_phase_offsets(calc_para, True)
# determines what type of visual experiment is being used for comparison (if provided)
if 'vis_expt_type' in calc_para:
# case is a calculation parameter is set
ud_rot_expt = calc_para['vis_expt_type'] == 'UniformDrifting'
else:
# case is no calculation parameter is set, so use uniform drifting
ud_rot_expt = True
# sets up the black-only rotation filter object
r_filt_black = cf.init_rotation_filter_data(False)
r_obj_black = RotationFilteredData(data, r_filt_black, None, plot_exp_name, plot_all_expt, p_scope, False,
t_ofs=t_ofs_rot, t_phase=t_phase_rot)
# retrieves the rotational filtered data (black conditions only)
r_filt_rot = cf.init_rotation_filter_data(False)
r_data.r_obj_rot_ds = RotationFilteredData(data, r_filt_rot, None, plot_exp_name, plot_all_expt,
p_scope, False)
# retrieves the visual filtered data
r_filt_vis = cf.init_rotation_filter_data(True)
if ud_rot_expt:
# sets the visual phase/offset
if t_phase_vis is None:
# if the phase duration is not set
t_phase_vis, t_ofs_vis = 2., 0.
elif (t_phase_vis + t_ofs_vis) > 2:
# output an error to screen
e_str = 'The entered analysis duration and offset is greater than the experimental phase duration:\n\n' \
' * Analysis Duration + Offset = {0}\n s. * Experiment Phase Duration = {1} s.\n\n' \
'Enter a correct analysis duration/offset combination before re-running ' \
'the function.'.format(t_phase_vis + t_ofs_vis, 2.0)
self.work_error.emit(e_str, 'Incorrect Analysis Function Parameters')
# return a false value indicating the calculation is invalid
return False
# case is uniform-drifting experiments (split into CW/CCW phases)
r_filt_vis['t_type'], r_filt_vis['is_ud'], r_filt_vis['t_cycle'] = ['UniformDrifting'], [True], ['15']
r_data.r_obj_vis, ind_type = cf.split_unidrift_phases(data, r_filt_vis, None, plot_exp_name, plot_all_expt,
p_scope, t_phase_vis, t_ofs_vis)
if (r_data.phase_roc_ud is None) and ('Wilcoxon' not in calc_para['grp_stype']):
self.calc_ud_roc_curves(data, r_data.r_obj_vis, ind_type, 66.)
else:
# case is motor-drifting experiments
# retrieves the filtered data from the loaded datasets
r_filt_vis['t_type'], r_filt_vis['is_ud'], ind_type = ['MotorDrifting'], [False], None
t_ofs, t_phase = cfcn.get_rot_phase_offsets(calc_para, is_vis=True)
# runs the rotation filter
r_data.r_obj_vis = RotationFilteredData(data, r_filt_vis, None, plot_exp_name, plot_all_expt,
p_scope, False, t_ofs=t_ofs, t_phase=t_phase)
if not r_data.r_obj_vis.is_ok:
# if there was an error, then output an error to screen
self.work_error.emit(r_data.r_obj_vis.e_str, 'Incorrect Analysis Function Parameters')
return False
# calculate the visual/rotation stats scores
sf_score_rot, i_grp_rot, r_CCW_CW_rot = calc_combined_spiking_stats(r_data, r_data.r_obj_rot_ds, pool,
calc_para, g_para, p_val)
sf_score_vis, i_grp_vis, r_CCW_CW_vis = calc_combined_spiking_stats(r_data, r_data.r_obj_vis, pool,
calc_para, g_para, p_val, ind_type,
r_filt_vis['t_type'][0])
# memory allocation
ds_type_tmp, ms_type_tmp, pd_type_tmp = [], [], []
r_data.ms_gtype_N, r_data.ds_gtype_N, r_data.pd_type_N = [], [], []
A = np.empty(len(i_grp_rot), dtype=object)
r_data.ds_gtype_ex, r_data.ms_gtype_ex, r_data.pd_type_ex = dcopy(A), dcopy(A), dcopy(A)
r_data.ds_gtype_comb, r_data.ms_gtype_comb = dcopy(A), dcopy(A)
# reduces the arrays to the matching cells
for i in range(len(i_grp_rot)):
if len(i_grp_rot[i]):
# retrieves the matching rotation/visual indices
ind_rot, ind_vis = cf.det_cell_match_indices(r_data.r_obj_rot_ds, i, r_data.r_obj_vis)
# determines the motion sensitivity from the score phase types (append proportion/N-value arrays)
# 0 = None
# 1 = Rotation Only
# 2 = Visual Only
# 3 = Both
_sf_score_rot = sf_score_rot[i_grp_rot[i][ind_rot]][:, :-1]
_sf_score_vis = sf_score_vis[i_grp_vis[i][ind_vis]][:, :-1]
ms_gtype_comb = ( | np.sum(_sf_score_rot, axis=1) | numpy.sum |
#! /usr/bin/env python3
import math
import numpy as np
import open3d as o3d
import sensor_msgs.point_cloud2 as pc2
class Utils(object):
@staticmethod
def convert_pointcloud2_msg_to_array(cloud_msg):
points_list = []
for data in pc2.read_points(cloud_msg, skip_nans=True):
points_list.append([data[0], data[1], data[2], data[3]])
return np.array(points_list)
@staticmethod
def convert_pose_stamped_msg_to_array(pose_msg):
position = np.array([pose_msg.pose.position.x, pose_msg.pose.position.y, pose_msg.pose.position.z])
orientation = np.array([pose_msg.pose.orientation.w, pose_msg.pose.orientation.x, pose_msg.pose.orientation.y, pose_msg.pose.orientation.z])
return position, orientation
@staticmethod
def convert_pos_quat_to_transformation(pos, quat):
R = o3d.geometry.get_rotation_matrix_from_quaternion(quat)
T = np.empty((4, 4))
T[0:3, 0:3] = R
T[0:3, 3] = pos
T[3, :] = [0, 0, 0, 1]
return T
@staticmethod
def convert_pointcloud2_msg_to_array(cloud_msg):
points_list = []
for data in pc2.read_points(cloud_msg, skip_nans=True):
points_list.append([data[0], data[1], data[2], data[3]])
return | np.array(points_list) | numpy.array |
import numpy as np
import itertools
import pyglet
from pyglet.gl import *
from copy import deepcopy
from constraints import *
from collider import Collider, orth
from rigidbody import Rigidbody
from gameobject import GameObject
np.set_printoptions(edgeitems=30, linewidth=100000, formatter=dict(float=lambda x: "%.3g" % x))
class ConstraintScene():
def __init__(self, gravity=-981):
self.objects = []
self.pts = []
self.vecs = []
self.constraints = []
self.paused = False
self.gravity = np.array([0., gravity])
def add_game_object(self, obj):
self.objects.append(obj)
self.constraints = [PersistentContactConstraint(a, b) for a, b in itertools.combinations(self.objects, 2)]
def add_game_objects(self, objs):
self.objects.extend(objs)
self.constraints = [PersistentContactConstraint(a, b) for a, b in itertools.combinations(self.objects, 2)]
def run(self, frame_rate):
snapshot = deepcopy(self.objects)
config = Config(double_buffer=True, samples=4)
window = pyglet.window.Window(config=config, resizable=True)
fps_display = pyglet.window.FPSDisplay(window=window)
self.offset = np.array([0., 0])
@window.event
def on_key_press(key, modifiers):
if key == pyglet.window.key.E:
self.add_game_objects((GameObject(Collider.random_polygon(6, np.random.randint(
50, 101)), Rigidbody(1., 2e4, np.array([650., 800 + i * 300]), np.pi / 3)) for i in range(8)))
print(len(self.constraints))
elif key == pyglet.window.key.R:
self.offset = np.zeros(2)
self.objects = deepcopy(snapshot)
elif key == pyglet.window.key.P:
self.paused = not self.paused
@window.event
def on_draw():
window.clear()
fps_display.draw()
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnable(GL_BLEND)
glEnable(GL_LINE_SMOOTH)
glHint(GL_LINE_SMOOTH_HINT, GL_DONT_CARE)
glLineWidth(2)
glPointSize(3)
batch = pyglet.graphics.Batch()
for obj in self.objects:
glColor4f(1, 0, 0, 1)
v = obj.glVerts(self.offset)
batch.add(len(v) // 2, GL_LINES, None, ('v2f', v))
batch.draw()
glColor4f(1, 1, 1, 1)
batch = pyglet.graphics.Batch()
for p in self.pts:
batch.add(1, GL_POINTS, None, ('v2f', (p[0], p[1])))
batch.draw()
glColor4f(0, 1, 0, 1)
for v in self.vecs:
pyglet.graphics.draw(2, GL_LINES, ('v2f', (v[0][0], v[0][1], v[1][0], v[1][1])))
self.dt = 1. / frame_rate
pyglet.clock.schedule_interval(self.update, self.dt)
pyglet.app.run()
def solve_constraint(self, constraint, dt):
a, b, M, Js, qs = constraint.a, constraint.b, constraint.M, constraint.J, constraint.q
V = np.hstack([a.rb.v, a.rb.w, b.rb.v, b.rb.w])
dV = | np.zeros(6) | numpy.zeros |
from __future__ import annotations
import copy
import math
import numpy as np
from matplotlib import animation
from matplotlib import pyplot as plt
MAX_LENGTH = 40
def get_plan() -> list[float]:
"""
TODO: make a plan, which is a list of 40 floats between -1.0 and 1.0.
- The order of nums roughly correlates with the timesteps of the simulation.
- You can call sim(plan) to get the distance traveled using that plan.
"""
plan = [1 for _ in range(MAX_LENGTH)]
return plan
# ------------------- Do Not Edit Beyond This Point -------------------
# pylint: disable=too-many-statements
def simulate_qwop(plan: list[float]) -> tuple[float, list[np.ndarray]]:
"""Simulates the game of QWOP."""
if len(plan) > MAX_LENGTH:
raise RuntimeError(
f"Plan has length: {len(plan)}. Should have length {MAX_LENGTH}."
)
plan = np.clip(plan, -1.0, 1.0)
dt = 0.1
friction = 1.0
gravity = 0.1
mass = np.array([30, 10, 5, 10, 5, 10], dtype=float)
edgel = np.array([0.5, 0.5, 0.5, 0.5, 0.9], dtype=float)
edgesp = np.array([160.0, 180.0, 160.0, 180.0, 160.0], dtype=float)
edgef = np.array([8.0, 8.0, 8.0, 8.0, 8.0], dtype=float)
anglessp = np.array([20.0, 20.0, 10.0, 10.0], dtype=float)
anglesf = np.array([8.0, 8.0, 4.0, 4.0], dtype=float)
edge = np.array([[0, 1, 0, 3, 0], [1, 2, 3, 4, 5]], dtype=int)
angles = np.array([[4, 4, 0, 2], [0, 2, 1, 3]], dtype=int)
# vel and pos of the body parts, 0 is hip, 5 is head, others are joints
v = np.zeros((6, 2), dtype=float)
p = np.array(
[[0, 1], [0, 0.5], [-0.25, 0], [0.25, 0.5], [0.25, 0], [0.15, 1.9]], dtype=float
)
spin = 0.0
maxspin = 0.0
lastang = 0.0
data = []
for i in range(200):
# This is equivalent to a nested for loop:
# for j in range(20):
# for k in range(10): (k = i - j * 5)
j = (i // 10) * 2
lamb = 0.1 * (i - j * 5) + 0.05
t0 = ((plan[j - 2] if j > 0 else 0.5) * (1 - lamb)) + (plan[j] * lamb)
t1 = ((plan[j - 1] if j > 0 else 0.0) * (1 - lamb)) + (plan[j + 1] * lamb)
contact = p.T[1] <= 0
clipped_p = np.clip(p.T[1], 0, None)
if (clipped_p != p.T[1]).any():
spin = 0
p.T[1] = clipped_p
anglesl = [-(2.8 + t0), -(2.8 - t0), -(1 - t1) * 0.9, -(1 + t1) * 0.9]
disp = p[edge[1]] - p[edge[0]] # (5, 2)
dist = np.sqrt(disp.T[0] ** 2 + disp.T[1] ** 2) + 0.01 # (5, )
dispn = disp.T / dist.T # (5, 2)
dispv = v[edge[1]] - v[edge[0]] # (5, 2)
distv = 2 * np.sum(disp * dispv, axis=1) # (5,)
# Array broadcasting: (5,) * (5, 2) -> (5, 2)
forceedge = (((edgel - dist) * edgesp - distv * edgef) * dispn).T
edgeang = np.arctan2(disp.T[1], disp.T[0]) # (5,)
edgeangv = (dispv.T[0] * disp.T[1] - dispv.T[1] * disp.T[0]) / (dist ** 2)
# (5, 2)
spin += normalize_angle(edgeang[4] - lastang)
spinc = spin - 0.005 * i
if spinc > maxspin:
maxspin = spinc
lastang = edgeang[4]
angv = edgeangv[angles[1]] - edgeangv[angles[0]]
# (4, )
angf = normalize_angle_array(
edgeang[angles[1]] - edgeang[angles[0]] - anglesl
) * anglessp - angv * anglesf * | np.min(dist[angles] / edgel[angles], axis=0) | numpy.min |
from __future__ import print_function, division
import numpy as np
from pyscf import lib
def polariz_inter_ave(mf, gto, tddft, comega):
gto.set_common_orig((0.0,0.0,0.0))
ao_dip = gto.intor_symmetric('int1e_r', comp=3)
occidx = np.where(mf.mo_occ==2)[0]
viridx = np.where(mf.mo_occ==0)[0]
mo_coeff = mf.mo_coeff
mo_energy = mf.mo_energy
orbv,orbo = mo_coeff[:,viridx], mo_coeff[:,occidx]
vo_dip = np.einsum('cmb,bn->cmn', | np.einsum('am,cab->cmb', orbv, ao_dip) | numpy.einsum |
#!/usr/bin/env python
# coding: utf-8
# DO NOT EDIT
# Autogenerated from the notebook rolling_ls.ipynb.
# Edit the notebook and then sync the output with this file.
#
# flake8: noqa
# DO NOT EDIT
# # Rolling Regression
#
# Rolling OLS applies OLS across a fixed windows of observations and then
# rolls
# (moves or slides) the window across the data set. They key parameter is
# `window`
# which determines the number of observations used in each OLS regression.
# By
# default, `RollingOLS` drops missing values in the window and so will
# estimate
# the model using the available data points.
#
# Estimated values are aligned so that models estimated using data points
# $i+1, i+2, ... i+window$ are stored in location $i+window$.
#
# Start by importing the modules that are used in this notebook.
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pandas_datareader as pdr
import seaborn
import statsmodels.api as sm
from statsmodels.regression.rolling import RollingOLS
seaborn.set_style("darkgrid")
pd.plotting.register_matplotlib_converters()
# `pandas-datareader` is used to download data from
# [Ken French's website](https://mba.tuck.dartmouth.edu/pages/faculty/ken.
# french/data_library.html).
# The two data sets downloaded are the 3 Fama-French factors and the 10
# industry portfolios.
# Data is available from 1926.
#
# The data are monthly returns for the factors or industry portfolios.
factors = pdr.get_data_famafrench("F-F_Research_Data_Factors",
start="1-1-1926")[0]
factors.head()
industries = pdr.get_data_famafrench("10_Industry_Portfolios",
start="1-1-1926")[0]
industries.head()
# The first model estimated is a rolling version of the CAPM that
# regresses
# the excess return of Technology sector firms on the excess return of the
# market.
#
# The window is 60 months, and so results are available after the first 60
# (`window`)
# months. The first 59 (`window - 1`) estimates are all `nan` filled.
endog = industries.HiTec - factors.RF.values
exog = sm.add_constant(factors["Mkt-RF"])
rols = RollingOLS(endog, exog, window=60)
rres = rols.fit()
params = rres.params.copy()
params.index = | np.arange(1, params.shape[0] + 1) | numpy.arange |
# Copyright 2021 The Distla Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Utility functions for distributed matrices across ASIC slices.
`pops` contains utility functions for manipulating matrices distributed across
ASIC slices.
Functions `distribute` and `undistribute` map data stored in host memory
to a ShardedDeviceArray representing the same data distributed across all
connected ASIC cores. The result is a ShardedDeviceArray of shape
(# of processors, rows per processor, columns per processor). pmapping that
array over its first axis then assigns each core a contiguous matrix block.
The processor distribution for NHROWS = NHCOLS = NDROW = NDCOLS = 2 is as
follows:
0 2 8 10
1 3 9 11
4 6 12 14
5 7 13 15
"""
import functools
from typing import Tuple, Sequence, Union
import warnings
import jax
from jax.interpreters import pxla
import jax.numpy as jnp
import numbers
import numpy as np
from distla_core.utils import misc
from distla_core.utils import config
################################################################################
# CONFIGURATION
################################################################################
NHROWS = config.NHROWS
NHCOLS = config.NHCOLS
NDROWS = config.NDROWS
NDCOLS = config.NDCOLS
AXIS_NAME = config.get_axis_name()
NROWS = config.NROWS
NCOLS = config.NCOLS
NDPROCS = config.NDPROCS
NPROCS = config.NPROCS
GRID = config.GRID
DGRID = config.DGRID
HGRID = config.HGRID
EF57_DTYPES = (jnp.float64, jnp.complex128)
################################################################################
# UTILITIES
################################################################################
def pmap(f, *args, **kwargs):
return jax.pmap(f, *args, axis_name=AXIS_NAME, **kwargs)
def padded_local_ncols(logical_ncols):
if logical_ncols < 1:
raise ValueError(f"Invalid logical_ncols={logical_ncols}.")
largest_proc_dim = max(GRID)
pad_size = misc.distance_to_next_divisor(logical_ncols, largest_proc_dim)
return (logical_ncols + pad_size) // NCOLS
def _get_all_to_all_axis_index_groups(grid_shape, sharded_axes):
grid = np.arange(int(np.prod(grid_shape))).reshape(grid_shape, order='C')
reduced_shape = [
grid_shape[s] for s in range(len(grid_shape)) if s not in sharded_axes
]
axis_index_groups = []
for i in np.ndindex(*reduced_shape):
slices = list(i)
for sharded_axis in sharded_axes:
slices.insert(sharded_axis, slice(0, grid_shape[sharded_axis]))
axis_index_groups.append(list(np.ravel(grid[tuple(slices)])))
return axis_index_groups
def _to_tuple(val):
if isinstance(val, numbers.Number):
return (val,)
return tuple(val)
def all_to_all(
array: pxla.ShardedDeviceArray,
sharded_axes: Union[int, Sequence[int]],
split_axes: Union[int, Sequence[int]],
concat_axes: Union[int, Sequence[int]],
grid_shape: Tuple[int],
):
"""
Swap pmapped axes `sharded_axes` with local axes `split_axes`, and place
them at the local positions `concat_axes`.
The global part of `array` is considered to be of shape `grid_shape`, and
the pmap-axis placement of each shard of the global array is in 'C' order,
i.e. shard `i` of the array (the `i`-th element of the pmapped axis) is
placed on position `np.ravel(grid)[i]` on the grid, with
`grid = np.arange(jax.device_count()).reshape(grid_shape, order='C')`.
`sharded_axis`, `split_axes` and `concat_axes` have be either ints, or
sequences of ints of identical length.
Note: Due to an XLA bug (https://github.com/google/jax/issues/5861) this
function currently only works properly in ASICs.
Args:
array: A sharded array.
sharded_axes: The sharded axes to be swapped with local axes.
split_axes: The local axes to be pmapped.
concat_axes: local axes positions where `sharded_axes`
should be placed after localizing them.
grid_shape: the processor grid shape.
Returns:
ShardedDeviceArray: The result of the operation.
"""
def ind_sort(sequence, inds):
return tuple([sequence[i] for i in inds])
sharded_axes = _to_tuple(sharded_axes)
split_axes = _to_tuple(split_axes)
concat_axes = _to_tuple(concat_axes)
if len(split_axes) != len(concat_axes):
raise ValueError(f"split_axes and concat_axes are of unequal "
f"length {len(split_axes)} and {len(concat_axes)}.")
if len(split_axes) != len(sharded_axes):
raise ValueError(f"split_axes and sharded_axes are of unequal "
f"length {len(split_axes)} and {len(sharded_axes)}.")
sharded_dims = np.asarray([grid_shape[a] for a in sharded_axes])
local_dims = np.asarray([array.shape[a] for a in split_axes])
if not np.all(sharded_dims == local_dims):
raise ValueError(f"dimensions {sharded_dims} of global axes "
f"do not match dimensions {local_dims} of "
f"the local axes")
# we first sort sharded_axes
inds = np.argsort(sharded_axes)
sharded_axes = ind_sort(sharded_axes, inds)
split_axes = ind_sort(split_axes, inds)
concat_axes = ind_sort(concat_axes, inds)
axis_index_groups = _get_all_to_all_axis_index_groups(grid_shape,
sharded_axes)
if len(split_axes) == 1:
# this case is already covered within jax
return jax.lax.all_to_all(
array,
axis_name=AXIS_NAME,
split_axis=split_axes[0],
concat_axis=concat_axes[0],
axis_index_groups=axis_index_groups,
)
# we move all split_axes to the left side of the array
# and combine them into a single dimension
# transpose
n_split = len(split_axes)
permarray = jnp.moveaxis(array, split_axes, tuple(range(n_split)))
# now reshape
permshape = permarray.shape
comb_permshape = (int(np.prod(permshape[:n_split])),) + permshape[n_split:]
permarray = permarray.reshape(comb_permshape)
#now we swap the local index 0 with `sharded_axes`
result = jax.lax.all_to_all(
permarray,
axis_name=AXIS_NAME,
split_axis=0,
concat_axis=0,
axis_index_groups=axis_index_groups,
)
# finally we split the swapped axes back into their original shapes
# and move them to their final positions.
final_shape = tuple([grid_shape[a] for a in sharded_axes
]) + comb_permshape[1:]
return jnp.moveaxis(
result.reshape(final_shape), tuple(range(len(sharded_axes))), concat_axes)
def distribution_type(array):
"""Returns the distribution pattern of a matrix.
The possible values are `"undistributed"` if it is a Jax or numpy array with
two indices; `"distributed"` if it is a `ShardedDeviceArray` with three
indices; and `"traced"` if it is a `DynamicJaxprTracer` with two indices.
Args:
array: The matrix.
Raises:
TypeError: If the distribution pattern is not one of the above.
Returns:
The distribution pattern of `array`.
"""
ndim = array.ndim
RegularArray = (jnp.DeviceArray, np.ndarray)
ShardedDeviceArray = jax.interpreters.pxla.ShardedDeviceArray
DynamicTracer = jax.interpreters.partial_eval.DynamicJaxprTracer
if isinstance(array, ShardedDeviceArray) and ndim == 3:
return "distributed"
if isinstance(array, DynamicTracer) and ndim == 2:
return "traced"
if isinstance(array, RegularArray) and ndim == 2:
return "undistributed"
msg = ("Don't know how to interpret the distribution pattern of a matrix of "
f"type {type(array)} with {ndim} indices.")
raise TypeError(msg)
###############################################################################
# SLICING
###############################################################################
def _in_range(rows, row_start, n_rows):
return jnp.logical_and(rows >= row_start, rows < row_start + n_rows)
def get_rows(matrix, row_start, n_rows, rows=None):
""" Extracts matrix[row_start:row_start + n_rows, :] from each processor
column. The result is replicated across processor columns.
Presently, n_rows must be less than the local
number of rows per panel, but this could probably be relaxed with
some effort.
Args:
matrix: ShardedDeviceArray to take from.
row_start: First row to take.
n_rows: Number of rows to take.
rows: Optionally previously-computed row indices.
Returns:
The n_rows x matrix.shape[1]: panel, replicated across processor columns.
"""
m_l, _ = matrix.shape
if rows is None:
rows, _ = indices(matrix.shape)
if n_rows > m_l:
raise TypeError(
f"Cannot extract more rows {n_rows} than local rows {m_l}.")
row_start = jnp.full(1, row_start)
prow_start = row_start // m_l
prow_finish = (row_start + n_rows - 1) // m_l
two_prows = jnp.all(prow_start != prow_finish)
return jax.lax.cond(two_prows,
lambda x: _get_rows_two_prows(n_rows, x),
lambda x: _get_rows_single_prow(n_rows, x),
(matrix, row_start, rows))
def _get_rows_single_prow(n_rows, args):
""" Handles the simple case that the row panel lies within a prow.
We take the needed slice (whose size is known in advance)
and broadcast it.
"""
matrix, row_start, _ = args
m_l, n = matrix.shape
prow, brow = divmod(row_start, m_l)
panel = jax.lax.dynamic_slice(matrix, (brow[0], 0), (n_rows, n))
return broadcast_prow(panel, prow)
def _get_rows_two_prows(n_rows, args):
""" Handles the trickier case that the row panel straddles two prows.
The size of the local slices to extract is not known at compile time in
this case.
All of the elements are either in the "top", matrix[:n_rows, :],
or the "bottom", matrix[-n_rows:, :], which do have known sizes.
We extract these panels, mask all elements not in the desired extraction,
and then cyclically permute the elements upwards so that the extracted
rows are lined up with their location in the extraction. We then sum
the masked data, both locally and within pcols.
"""
matrix, row_start, rows = args
m_l, n = matrix.shape
top_offset = row_start % m_l # Local index of first row
top_blocks = matrix[:n_rows, :]
top_rows = rows[:n_rows, :]
good_top_rows = _in_range(top_rows, row_start, n_rows)
good_top_blocks = mask(top_blocks, good_top_rows)
good_top_blocks = jnp.roll(good_top_blocks, -top_offset, axis=0)
bottom_blocks = matrix[-n_rows:, :]
bottom_rows = rows[-n_rows:, :]
good_bottom_rows = _in_range(bottom_rows, row_start, n_rows)
good_bottom_rows = jnp.logical_and(
good_bottom_rows, bottom_rows != top_rows)
good_bottom_blocks = mask(bottom_blocks, good_bottom_rows)
good_bottom_blocks = jnp.roll(good_bottom_blocks, -top_offset, axis=0)
panel = good_top_blocks + good_bottom_blocks
return sum_over_pcols(panel)
def set_rows(matrix, panel, row_start, rows=None):
""" Inserts the (n_rows, n_l) panel to the locally
(m_l, n_l) matrix, logically as
matrix[row_start:row_start + n_rows, :]. It is assumed the panel data
is replicated over processor columns, so that n_rows is the mathematical
size of the panel.
Presently, we must have n_rows <= m_l.
Args:
matrix: ShardedDeviceArray to take from.
panel: Panel to insert.
row_start: Slice offset.
rows: Optionally previously-computed row indices of matrix.
Returns:
The matrix with the panel inserted.
"""
m_l, n_l = matrix.shape
n_rows, n_l_p = panel.shape
if n_l_p != n_l:
raise TypeError("Incompatible shapes {matrix.shape}, {panel.shape}.")
if n_rows > m_l:
raise TypeError(
f"Cannot insert more rows {n_rows} than local rows {m_l}.")
if rows is None:
rows, _ = indices(matrix.shape)
prow_start = row_start // m_l
prow_finish = (row_start + n_rows - 1) // m_l
two_prows = jnp.all(prow_start != prow_finish)
result = jax.lax.cond(two_prows,
_set_rows_two_prows,
_set_rows_single_prow,
(matrix, panel, row_start, rows))
return result
def _set_rows_single_prow(args):
matrix, panel, row_start, rows = args
n_rows = panel.shape[0]
panel_masked = mask(
matrix, jnp.logical_not(_in_range(rows, row_start, n_rows)))
brow = row_start % matrix.shape[0]
update = jax.lax.dynamic_update_slice(matrix, panel, (brow, 0))
update = mask(update, _in_range(rows, row_start, n_rows))
return panel_masked + update
def _set_rows_two_prows(args):
matrix, panel, row_start, rows = args
# First we pad the panel to the local size of matrix.
n_rows = panel.shape[0]
panel = jax.lax.dynamic_update_slice(
jnp.zeros_like(matrix), panel, (0, 0))
# We need to sort the rows of panel so that those to
# be inserted are correctly aligned with matrix.
row_idxs = jnp.arange(0, panel.shape[0]) + row_start
row_end = n_rows + row_start
# If this is the topmost prow containing an insertion,
# aligning the rows requires us to bring those not being inserted to the
# top of the block. Otherwise they must be brought to the bottom.
# We thus replace the row_idxs outside the insertion with -1 or
# max(row_idx) + 1 respectively. Sorting the rows then achieves this.
# True iff row_start is in this block of rows.
upper_prow = jnp.isin(jnp.full(1, row_start), rows)
upper_prow = jnp.full_like(row_idxs, upper_prow)
mask_value = jnp.where(upper_prow,
x=jnp.full_like(row_idxs, -1),
y=jnp.full_like(row_idxs, row_end + 1))
first_prow_idx = rows[0, 0]
last_prow_idx = rows[-1, 0]
to_be_inserted_here = jnp.logical_and(
row_idxs >= first_prow_idx, row_idxs <= last_prow_idx)
masked_row_idxs = jnp.where(to_be_inserted_here, x=row_idxs, y=mask_value)
sort_idxs = jnp.argsort(masked_row_idxs)
panel = panel[sort_idxs, :]
in_range = jnp.logical_and(rows >= row_start, rows < row_end)
return jnp.where(in_range, x=panel, y=matrix)
################################################################################
# DOT
################################################################################
def _paneled_dot(A, B, precision, panel_size_threshold):
"""Compute A @ B, breaking the computation into panels if necessary for memory
use.
By panels we mean computing C = A @ B as e.g. C[:ps0, :ps2] = A[:ps0, :] @
B[:, :ps2], etc. The breaking into panels is done if the temporary arrays that
the call to jnp.dot(A, B) would result in would cause roughly more than
`panel_size_threshold` gigabytes of memory allocations. Enough panels are used
such that the allocations would stay (roughly) below that threshold.
This should rarely if ever be necessary for float32 matmuls, but with ef57
data types jnp.dot allocates a lot of temporaries (roughly 12x the inputs + 8x
the output).
The paneling, while saving memory, obviously causes a time overhead, that
varies with matrix size.
Args:
A, B: The matrices to multiply.
precision: Jax matmul precision.
panel_size_threshold: Rough maximum number of gigabytes to allow for
temporary arrays.
Returns:
jnp.dot(A, B, precision=precision)
"""
if A.dtype != B.dtype:
msg = (f"Can't compute a paneled matmul of matrices with mixed dtypes "
f"({A.dtype} and {B.dtype}.")
raise TypeError(msg)
dim0, dim1 = A.shape
_, dim2 = B.shape
# Sizes in gigabytes
bytes_per_element = misc.byte_count(A.dtype)
giga = 2**30
A_size = dim0 * dim1 * bytes_per_element / giga
B_size = dim1 * dim2 * bytes_per_element / giga
C_size = dim0 * dim2 * bytes_per_element / giga
# Compute how many panels each matrix needs to be divided to, at the least.
is_ef57 = A.dtype in EF57_DTYPES
# The fact that these prefactors are large for ef57 is the reason for the
# existence of this function. See REDACTED
input_prefactor = 6 if is_ef57 else 1
output_prefactor = 4 if is_ef57 else 1
A_num_panels = int(np.ceil(input_prefactor * A_size / panel_size_threshold))
B_num_panels = int(np.ceil(input_prefactor * B_size / panel_size_threshold))
C_num_panels = int(np.ceil(output_prefactor * C_size / panel_size_threshold))
if A_num_panels == 1 and B_num_panels == 1 and C_num_panels == 1:
return jnp.dot(A, B, precision=precision)
# C will get A_num_panels * B_num_panels panels. If that's too small, increase
# the paneling of A and/or B as necessary. This will only trigger in cases
# where the summed over index is smaller than the free indices, a somewhat
# rare occasion.
while A_num_panels * B_num_panels < C_num_panels:
if A_num_panels == 1:
B_num_panels = C_num_panels
elif B_num_panels == 1:
A_num_panels = C_num_panels
else:
A_num_panels *= 2
B_num_panels *= 2
# One may wonder if we should make sure that the dimensions are divisible by
# the panel sizes. Turns out we don't, because of how dynamic_slice and
# dynamic_update_slice handle overruns. What happens in that case is that dim0
# % A_num_panels rows (dim2 % B_num_panels columns) are computed twice, a
# negligible cost since the panel numbers will always be much smaller than the
# dimension.
# ps for panel size. These are rounded up, to rather recompute a few
# rows/columns than leave a few uncomputed.
ps0 = int(np.ceil(dim0 / A_num_panels))
ps2 = int(np.ceil(dim2 / B_num_panels))
C = jnp.empty((dim0, dim2), dtype=A.dtype)
def body(i, args):
A, B, C = args
i0, i2 = divmod(i, B_num_panels)
start0 = i0 * ps0
start2 = i2 * ps2
A_panel = jax.lax.dynamic_slice(A, (start0, 0), (ps0, dim1))
B_panel = jax.lax.dynamic_slice(B, (0, start2), (dim1, ps2))
C_panel = jnp.dot(A_panel, B_panel, precision=precision)
C = jax.lax.dynamic_update_slice(C, C_panel, (start0, start2))
return A, B, C
_, _, C = jax.lax.fori_loop(0, A_num_panels * B_num_panels, body, (A, B, C))
return C
def dot(
A,
B,
precision=jax.lax.Precision.HIGHEST,
ef57_paneling=True,
panel_size_threshold=1,
):
"""Compute `jnp.dot(A, B, precision=precision)`, with bells and whistles.
The bells: The default precision is `HIGHEST` (for `jnp.dot` it's `DEFAULT`).
The whistles: If `ef57_paneling` is `True`, then large ef57 matrices are
broken into panels and the matmul is done in parts, to work around the large
memory overhead of the ef57 `jnp.dot`.
Args:
A, B: The matrices to multiply.
precision: Jax matmul precision. `jax.lax.Precision.HIGHEST` by default.
ef57_paneling: Boolean for whether to use paneling for large ef57 matrices.
`True` by default.
panel_size_threshold: The rough maximum amount of memory we should allow
the temporary arrays in `jnp.dot` to use, in gigabytes. Only relevant if
`ef57_paneling is True`. 1 by default. Changing this should rarely be
necessary.
Returns:
`jnp.dot(A, B, precision=precision)`
"""
if not ef57_paneling or (A.dtype not in EF57_DTYPES and
B.dtype not in EF57_DTYPES):
return jnp.dot(A, B, precision=precision)
else:
return _paneled_dot(A, B, precision, panel_size_threshold)
################################################################################
# INITIALIZATION
################################################################################
def distribute_global(matrix: np.ndarray):
"""
Distribute a 2D array onto the globally available Jax devices.
In a single-host setting this is equivalent to `distribute(matrix)`. In a
multi-host setting, each host should have a copy of the same matrix, and this
matrix should have dimensions divisible by `config.GRID`. The matrix is then
distributed over the global device grid according to
`config.get_processor_grid`.
WARNING: No check is performed that all host-matrices have identical values.
Args:
matrix: A two-dimensional array to be distributed.
Returns:
ShardedDeviceArray: The distributed matrix.
Raises:
ValueError: If `matrix.shape` is not divisible by `config.GRID`.
"""
nrows, ncols = matrix.shape
if nrows % NROWS != 0:
raise ValueError(f"matrix.shape[0] = {nrows} not "
f"evenly divisible by NHROWS = {NHROWS}")
if ncols % NCOLS != 0:
raise ValueError(f"matrix.shape[1] = {ncols} not "
f"evenly divisible by NHCOLS = {NHCOLS}")
d0 = nrows // NROWS
d1 = ncols // NCOLS
local_devices = jax.local_devices()
host_id = jax.host_id()
panels = []
for i, dev in enumerate(local_devices):
axis_index = host_id * NDPROCS + i
pcol = your_pcol(axis_index)
prow = your_prow(axis_index)
row_slice = slice(prow * d0, (prow + 1) * d0, None)
col_slice = slice(pcol * d1, (pcol + 1) * d1, None)
panels.append(jax.device_put(matrix[row_slice, col_slice], dev))
return jax.device_put_sharded(panels, local_devices)
def distribute(matrix: np.ndarray,
pmap: bool = True) -> pxla.ShardedDeviceArray:
"""
Distribute a 2D array onto the local Jax devices.
In a multi-host setting, each host should hold one piece of a larger global
matrix, that is to be distributed over the devices of that host. In a
single-host setting this function is equivalent to `distribute_global`.
The matrix local to each host should have dimensions divisible by
`config.DGRID`.
Args:
matrix: A two-dimensional array to be distributed.
Returns:
ShardedDeviceArray: The distributed matrix.
Raises:
ValueError: If `matrix.shape` is not divisible by `config.DGRID`.
"""
if not np.all([s % p == 0 for s, p in zip(matrix.shape, DGRID)]):
raise ValueError(f"matrix.shape = {matrix.shape} not evenly divisible "
f"by DGRID = {DGRID}.")
ndim = matrix.ndim
if ndim != 2:
raise ValueError(f"matrix.ndim = {ndim} must be 2 in this version.")
pshape = np.asarray(DGRID)
shape = misc.flatten(
[p, s] for s, p in zip(np.array(matrix.shape) // pshape, pshape))
perm = list(range(2 * ndim - 2, -1, -2)) + list(range(1, 2 * ndim, 2))
reshaped = matrix.reshape(shape).transpose(perm)
final_shape = (np.prod(reshaped.shape[:ndim]), *reshaped.shape[ndim:])
A = reshaped.reshape(final_shape)
if not pmap:
return A
return jax.pmap(lambda x: x, devices=jax.local_devices())(A)
def distribute_sparse(A):
msg = "distribute_sparse has been renamed to distribute_sparse_global"
warnings.warn(msg, DeprecationWarning)
return distribute_sparse_global(A)
def distribute_sparse_global(A):
"""Distributes a sparse matrix as a dense ShardedDeviceArray, globally.
This is the equivalent of `distribute_global`, but for sparse matrices.
The function works by building each dense block on a host in turn, sending it
to a device, and discarding it from host memory. Thus it is capable of
distributing matrices that wouldn't fit in host memory as dense matrices, as
long as the individual device-blocks fit in host and device memory. In a
multi-host setup, the sparse matrix A should be the same on all hosts.
Args:
A: A scipy.sparse sparse matrix.
Raises:
TypeError: If A's dimensions are not divisible by `config.GRID`.
ValueError: If the number of local devices is different from the number of
grid points assigned to this host.
Returns:
Ap: The distributed ShardedDeviceArray.
"""
local_devices = jax.local_devices()
n_ldevices = len(local_devices)
if n_ldevices != NDPROCS:
msg = ("Number of local devices ({}) is different from number of local "
"grid points ({}).".format(n_ldevices, NDPROCS))
raise ValueError(msg)
n_rows, n_cols = A.shape
if n_rows % NROWS != 0:
msg = ("The first dimension of A ({}) isn't divisible by the number of "
"grid rows ({})".format(n_rows, NROWS))
raise ValueError(msg)
if n_cols % NCOLS != 0:
msg = ("The second dimension of A ({}) isn't divisible by the number of "
"grid columns ({})".format(n_cols, NCOLS))
raise ValueError(msg)
d0 = n_rows // NROWS
d1 = n_cols // NCOLS
host_id = jax.host_id()
A = A.tocsr()
shards = []
for i, dev in enumerate(local_devices):
name = host_id * NDPROCS + i
pcol = your_pcol(name)
prow = your_prow(name)
row_slice = slice(prow * d0, (prow + 1) * d0, None)
col_slice = slice(pcol * d1, (pcol + 1) * d1, None)
block = A[row_slice, :].tocsc()[:, col_slice]
shards.append(jax.device_put(block.todense(), dev))
Ap = jax.device_put_sharded(shards, local_devices)
return Ap
@functools.partial(pmap, out_axes=None)
def undistribute_global(matrix):
"""Collect a globally distributed matrix into a 2D array.
This is the reverse operation of `distribute_global`: It collects the globally
distributed matrix onto each ASIC as a single array. In a single-host setting
this is equivalent to `undistribute(matrix)`.
Args:
matrix: Array to be undistributed.
Returns:
DeviceArray: The undistributed 2D array.
"""
d0, d1 = matrix.shape
result = jnp.zeros((NROWS * d0, NCOLS * d1), dtype=matrix.dtype)
prow = my_prow()
pcol = my_pcol()
result = jax.lax.dynamic_update_slice(result, matrix, (prow * d0, pcol * d1))
result = jax.lax.psum(result, axis_name=AXIS_NAME)
return result
def undistribute(
matrix: pxla.ShardedDeviceArray,
host_local: bool = True,
collect_to_host: bool = False,
) -> np.ndarray:
"""Collect a distributed matrix into a 2D array.
This is the reverse operation of `distribute`. In a multi-host setting, each
host gets an array corresponding to the block of the global matrix that
resided on the devices local to that host. In contrast to
`undistribute_global`, this block isn't embedded in a global matrix padded
with zeros to the right size, but just returned by itself.
Args:
matrix: A distributed array to be undistributed into a local array.
host_local: If True, it is assumed that each host contains a different
matrix so that the resulting undistributed matrix will be different on
each host. If False, it is instead assumed that each host contains a
copy of the same matrix which was initially distributed across the full
grid (e.g. a multi-host-distributed matrix after an all-gather).
collect_to_host: By default, the return value is a `DeviceArray` on device
#0. If `collect_to_host is True`, then it is instead a Numpy array on the
host.
Returns:
numpy.ndarray: The undistributed 2D array.
"""
if host_local:
grid_shape = DGRID
else:
grid_shape = GRID
local_shape = matrix.shape[1:]
shape = tuple(grid_shape[::-1]) + local_shape
perm = misc.flatten([[len(grid_shape) - 1 - n, n + len(grid_shape)]
for n in range(len(grid_shape))])
final_shape = misc.global_shape(local_shape, grid_shape)
if collect_to_host:
matrix = np.array(matrix)
return matrix.reshape(shape).transpose(perm).reshape(final_shape)
@functools.partial(jax.jit, static_argnums=(0, 1))
def eye(local_shape, dtype, k=0, unpadded_dim=None):
"""
Returns a matrix with ones on its `k'th`, diagonal and zeroes elsewhere,
with local shape `local_shape`, dtype `dtype`,
distributed across the grid `grid`.
Args:
local_shape: The shape of the matrix block on each core.
dtype: dtype of the matrix.
k: The diagonal to fill with ones.
unpadded_dim: If specified, only the top left `unpadded_dim x unpadded_dim`
block will be nonzero.
Returns:
The distributed rectangular identity.
"""
identity_matrix = jnp.zeros(local_shape, dtype=dtype)
identity_matrix = fill_diagonal(identity_matrix, 1, k=k)
if unpadded_dim is not None:
identity_matrix = apply_pad(identity_matrix, unpadded_dim)
return identity_matrix
################################################################################
# PROCESSOR ADDRESSING
################################################################################
def my_name():
"""
The pmap axis of this processor.
Returns:
i: The axis.
"""
return jax.lax.axis_index(axis_name=AXIS_NAME)
def your_pcol(p):
"""
Returns the pcol inhabited by processor p.
Args:
p: The processor number.
grid: Tuple of processor grid dimensions (prows, pcols).
Returns:
pcol: The pcol of processor p.
"""
host_idx = p // NDPROCS
device_idx = p - NDPROCS * host_idx
host_col = host_idx // NHROWS
device_col = device_idx // NDROWS
return host_col * NDCOLS + device_col
def your_prow(p):
"""
Returns the prow inhabited by processor p.
Args:
p: The processor number.
Returns:
prow: The prow of processor p.
"""
host_idx = p // NDPROCS
device_idx = p - NDPROCS * host_idx
host_row = host_idx % NHROWS
device_row = device_idx % NDROWS
return host_row * NDROWS + device_row
def my_pcol():
"""
Returns the pcol inhabited by this processor.
Args:
grid: Tuple of processor grid dimensions (prows, pcols).
Returns:
pcol: The pcol of this processor.
"""
return your_pcol(my_name())
def my_prow():
"""
Returns the prow inhabited by this processor.
Returns:
prow: The prow of this processor.
"""
return your_prow(my_name())
def in_this_pcol(pcol):
"""
Returns a bool describing whether this processor inhabits `pcol`.
Args:
pcol: The pcol.
Returns:
The bool.
"""
return pcol == my_pcol()
def in_this_prow(prow):
"""
Returns a bool describing whether this processor inhabits `prow`.
Args:
prow: The prow.
Returns:
The bool.
"""
return prow == my_prow()
################################################################################
# AXIS_INDEX_GROUPS
################################################################################
def _totuple(a):
"""
Converts a numpy array into nested tuples, so that each row of the array
is a different tuple.
E.g.
a = [ [0 1 2 3]
[4 5 6 7] ]
out = ((0, 1, 2, 3), (4, 5, 6, 7))
"""
try:
return tuple(_totuple(i) for i in a)
except TypeError:
return a
def _axis_index_prows():
"""
Returns axis_index_groups such that the relevant operation is performed
over prows. Thus for grid=(4, 2), returns
( (0, 4), (1, 5), (2, 6), (3, 7) ); that is, one nest tuple for each row
of processors.
"""
return _totuple(config.get_processor_grid())
def _axis_index_pcols():
"""
Returns axis_index_groups such that the relevant operation is performed
over pcols. Thus for grid=(4, 2), returns
( (0, 1, 2, 3), (4, 5, 6, 7) ); that is, one nested tuple for each column
of processors.
"""
return _totuple(config.get_processor_grid().T)
def _axis_index_prow_pairs(start_from_zero=True):
""" Returns axis_index_groups such that the relevant operation is performed
over alternating prows. Thus for grid=(4, 2), returns
((0, 1), (2, 3), (4, 5), (6, 7)) (start_from_zero = True) or
((0, 3), (1, 2), (4, 7), (5, 6) (False)); that is, one nested tuple for each
pair of cores.
"""
ps = config.get_processor_grid().T
if not start_from_zero:
ps = np.roll(ps, 1, axis=1)
ps = ps.flatten()
n_pairs = ps.size // 2
tuples = _totuple(ps.reshape((n_pairs, 2)))
return tuple(tuple(sorted(t)) for t in tuples)
def _paxis_groups_to_linear(groups, rows=True):
""" `groups` is a nested sequence of prows to group together. Returns the
corresponding nested sequence of pmap axis indices, one per pcol per group.
E.g. on (4, 2) with rows = True:
groups = ((0, 1), (2, 3))
returns ((0, 1), (2, 3), (4, 5), (6, 7))
On (4, 2) with rows = False:
groups = ((1, 0))
returns ((4, 0), (5, 1), (6, 2), (7, 3))
"""
pgrid = config.get_processor_grid()
if not rows:
pgrid = pgrid.T
grouped = pgrid[groups, :]
n_groups, per_group, n_other = grouped.shape
reshaped = grouped.transpose((1, 0, 2)).reshape(
(per_group, n_groups * n_other))
return _totuple(reshaped.T)
################################################################################
# COMMUNICATION
################################################################################
def _psum_masked(masked, axis_name, axis_index_groups=None):
"""Computes `psum` for a mask matrix in a broadcast.
The result is the same as that of
`jax.lax.psum(masked, axis_name, axis_index_groups)`.
However, for ef57 matrices an extra optimisation is used to save memory and
communication time, using the assumption that `masked` is only non-zero on
one of the cores in each `axis_index_groups`. This often arises when using
`psum` for broadcasting.
"""
dtype = masked.dtype
if dtype in (jnp.float64, jnp.complex128):
lower_dtype = jnp.float32 if dtype == jnp.float64 else jnp.complex64
hi = masked.astype(lower_dtype)
lo = (masked - hi.astype(dtype)).astype(lower_dtype)
hi, lo = jax.lax.psum(
(hi, lo),
axis_name,
axis_index_groups=axis_index_groups,
)
return hi.astype(dtype) + lo.astype(dtype)
else:
return jax.lax.psum(masked, axis_name, axis_index_groups=axis_index_groups)
def broadcast_prow(A, prow):
"""
Returns the result of a broadcast of the portion of `A` in `prow` to all
other prows.
E.g. with grid=(2, 2), prow=1:
A = [ [a], [b] ]
[ [c], [d] ]
Out = [ [c], [d] ]
[ [c], [d] ]
Args:
A: The array.
prow: The prow to broadcast.
Returns:
The broadcasted array.
"""
groups = _axis_index_pcols()
masked_A = mask_except_prow(A, prow)
return _psum_masked(masked_A, AXIS_NAME, axis_index_groups=groups)
def _paxis_groups_error_checks(bcast_indices, groups, axis_size):
""" Error checks for _axis_index_prow_groups and _axis_index_pcol_groups.
"""
if not len(groups):
raise TypeError("Must specify at least one group.")
ngroups = len(bcast_indices)
if ngroups != len(groups):
raise TypeError(f"len(bcast_indices)={ngroups} must equal "
f"len(groups)={len(groups)}")
group_sizes = np.array([len(group) for group in groups])
group_size = group_sizes[0]
if ngroups > 1:
if not np.all(group_sizes[1:] == group_size):
raise ValueError("Groups must be of equal size.")
all_pidxs = np.hstack([g for g in groups])
if set(all_pidxs) != set(range(axis_size)):
raise ValueError(f"groups={groups} must contain each paxis exactly once.")
too_small = np.array(bcast_indices) < 0
too_big = np.array(bcast_indices) >= group_size
if np.any(np.logical_or(too_small, too_big)):
raise ValueError(f"Invalid group indices {bcast_indices}.")
def broadcast_paxis_to_groups(A, bcast_indices, groups, rows=True):
""" Broadcasts A's data in each prow (rows=True) or pcol (False)
groups[i][bcast_indices[i]] to the other prows/pcols in groups[i].
E.g. Rows True, grid=(4, 2), bcast_indices=(1, 0), groups=((0, 3), (1, 2))
A = [ [a], [b] ]
[ [c], [d] ]
[ [e], [f] ]
[ [g], [h] ]
Out = [ [g], [h] ] x prows (0, 3) grouped, groups[0][1] = 3 broadcast
[ [c], [d] ] < prows (1, 2) grouped, groups[1][0] = 1 broadcast
[ [c], [d] ] <
[ [g], [h] ] x
Rows False, grid=(4, 4), bcast_indices=(1, 0), groups=((0, 3), (1, 2))
A = [ [a], [b], [c], [d] ]
[ [e], [f], [g], [h] ]
[ [i], [j], [k], [l] ]
[ [m], [n], [o], [p] ]
Out = [ [d], [b], [b], [d] ]
[ [h], [f], [f], [h] ]
[ [l], [j], [j], [l] ]
[ [p], [n], [n], [p] ]
Args:
A: Matrix to broadcast.
bcast_indices: Concrete sequence of the same length as `groups`,
specifying which entry in the corresponding group to broadcast.
groups: Concrete nested sequence of equally-sized concrete integer
sequences, specifying which prows will be grouped together. Each
integer in range(NROWS or COLS) must appear exactly once.
"""
if rows:
axis_size = NROWS
my_pidx = my_prow()
else:
axis_size = NCOLS
my_pidx = my_pcol()
_paxis_groups_error_checks(bcast_indices, groups, axis_size)
# the linear indices of the groups
linear_groups = _paxis_groups_to_linear(groups, rows=rows)
to_broadcast = [g[idx] for g, idx in zip(groups, bcast_indices)]
do_not_mask = jnp.isin(my_pidx, np.array(to_broadcast))
masked_A = mask(A, do_not_mask)
return _psum_masked(masked_A, AXIS_NAME, axis_index_groups=linear_groups)
def broadcast_pcol(A, pcol):
"""
Returns the result of a broadcast of the portion of `A` in `pcol` to all
other pcols.
E.g. with grid=(2, 2), pcol=1:
A = [ [a], [b] ]
[ [c], [d] ]
Out = [ [b], [b] ]
[ [d], [d] ]
Args:
A: The array.
pcol: The pcol to broadcast.
Returns:
The broadcasted array.
"""
groups = _axis_index_prows()
masked_A = mask_except_pcol(A, pcol)
return _psum_masked(masked_A, AXIS_NAME, axis_index_groups=groups)
def gather_columns(A):
"""
Concatenates (vstacks) the checkerboard-distributed matrix A within each
pcol, so that each prow now stores a copy of the same data.
If A had local shape (m_l, n_l), the result has local shape
(grid[0] * m_l, n_l).
"""
groups = _axis_index_pcols()
A = jax.lax.all_gather(A, axis_index_groups=groups, axis_name=AXIS_NAME)
return A.reshape((A.shape[0] * A.shape[1], A.shape[2]))
def scatter_columns(A):
"""
Performs the reverse operation as gather_columns. Thus, each prow receives
the prow'th row-slice of A.
If A had local shape(grid[0] * m_l, n_l), thre result has local shape
(m_l, n_l). If the number of local rows in A is not an even multiple
of grid[0] an error is thrown.
"""
m_l, n_l = A.shape
if m_l % NROWS != 0:
raise TypeError(f"Rows of A: {A.shape} can't be scattered over"
f"{NROWS} prows.")
panel_size = m_l // NROWS
start = my_prow() * panel_size
return jax.lax.dynamic_slice(A, (start, jnp.zeros_like(start)),
(panel_size, n_l))
def gather_prow_pairs(A, start_from_zero=True):
""" Each prow vstacks its data with its immediate downstairs neighbour.
Depending on the value of `start_from_zero`, prows are paired either like
(e.g.) (0, 1), (2, 3) (start_from_zero=True), or (1, 2), (3, 0)
(start_from_zero=False).
"""
groups = _axis_index_prow_pairs(start_from_zero)
A = jax.lax.all_gather(A, axis_index_groups=groups, axis_name=AXIS_NAME)
return A.reshape((A.shape[0] * A.shape[1], A.shape[2]))
@functools.partial(jax.jit, static_argnums=(1, 2))
def roll_paxis(A, shift, rows):
""" Cyclically permutes the data in A by `shift` prows.
Args:
A: Matrix to permute.
shift: Number of prows to permute by.
rows: Rolls prows if True, pcols if False.
Returns:
The permuted matrix.
"""
if rows:
paxis = np.arange(NROWS)
else:
paxis = np.arange(NCOLS)
rolled = np.roll(paxis, -shift)
stacked = np.vstack((paxis, rolled)).T
tups = _totuple(stacked)
groups = _paxis_groups_to_linear(tups, rows=rows)
return jax.lax.ppermute(A, AXIS_NAME, groups)
def vstack_equal_shape(A, B):
""" Returns C = [A, B]^T, where A and B are checkerboard-distributed
matrices of the same shape. This should be pmapped.
Args:
A: An M x N checkerboard distributed matrix.
B: An M x N checkerboard distributed matrix.
Returns:
C: The 2M x N checkerboard distributed result.
"""
if NROWS % 2 != 0:
raise ValueError("vstack_equal_shape assumes an even number of prows.")
if A.shape != B.shape:
raise TypeError(f"A.shape = {A.shape} must equal B.shape = {B.shape}.")
A = gather_prow_pairs(A) # Now 2M x N
B = gather_prow_pairs(B) # Now 2M x N
prow_half = NROWS // 2 - 1
A = roll_paxis(A, -prow_half, True)
B = roll_paxis(B, prow_half, True)
C = jnp.where(my_prow() <= prow_half, x=A, y=B)
return C
def gather_rows(A):
"""
Concatenates (hstacks) the checkerboard-distributed matrix A within each
pcol, so that each pcol now stores a copy of the same data.
If A had local shape (m_l, n_l), the result has local shape
(m_l, grid[1] * n_l).
"""
groups = _axis_index_prows()
A = jax.lax.all_gather(A, axis_index_groups=groups, axis_name=AXIS_NAME)
A = A.transpose((1, 0, 2))
return A.reshape((A.shape[0], A.shape[1] * A.shape[2]))
def scatter_rows(A):
"""
Performs the reverse operation as gather_columns. Thus, each pcol receives
the pcol'th column-slice of A.
If A had local shape(m_l, grid[1] * n_l), the result has local shape
(m_l, n_l). If the number of local columns in A is not an even multiple
of grid[1] an error is thrown.
"""
m_l, n_l = A.shape
if n_l % NCOLS != 0:
raise TypeError(f"Columns of A: {A.shape} can't be scattered over"
f"{NCOLS} pcols.")
panel_size = n_l // NCOLS
start = my_pcol() * panel_size
return jax.lax.dynamic_slice(A, (jnp.zeros_like(start), start),
(m_l, panel_size))
def _asic_cluster_to_asic_node_axis_index_groups(grid):
"""Creates the axis_index_groups that group together all the device-blocks
that would be part of the same block if the matrix was distributed over a
single asic_node.
In the device `grid` these groups correspond to submatrices
`grid[:block_rows, :block_cols], grid[block_rows:2*block_rows, :block_cols]`,
etc.
"""
asic_node_rows, asic_node_cols = NDROWS, NDCOLS
block_rows = NROWS // asic_node_rows
block_cols = NCOLS // asic_node_cols
axis_index_groups = grid.reshape(
(asic_node_rows, block_rows,
asic_node_cols, block_cols)).transpose((0, 2, 1, 3)).reshape(
asic_node_rows * asic_node_cols, block_rows * block_cols)
return list(axis_index_groups)
def gather_to_asic_nodes(A):
"""
Gathers a globally distributed matrix to individual asic_nodes.
`gather_to_asic_nodes` takes a matrix that is distributed over the global device
grid, possibly over multiple hosts, and gathers its blocks so that each asic_node
attached to a single host gets a copy of the matrix, distributed in the same
manner as `distribute` would use if running on a single asic_node. Note that when
running on a single asic_node this is a no-op.
This function should be called within a pmap, on a pmapped argument. The cost
is that of one `all_gather` and one `pshuffle`.
Args:
A: The distributed matrix to gather.
Returns:
The gathered matrix.
"""
# NOTE(mhauru): So far this code has been tested on a v_3 and a v_2.
ndim = A.ndim
if ndim != 2:
raise ValueError(f"A.ndim should be 2 in gather_to_asic_nodes, but was {ndim}.")
local_rows, local_cols = A.shape
grid = config.get_processor_grid()
asic_node_rows, asic_node_cols = NDROWS, NDCOLS
# Let us call the blocks of the matrix that reside on individual devices when
# the distribution is over the whole global grid asic_cluster-blocks, and the blocks
# that reside on individual devices when the distribution is over a single
# asic_node asic_node-blocks. Each asic_cluster-block corresponds to one element in `grid`.
#
# The heart of this function is an all_gather with axis_index_groups and a
# pshuffle. The axis_index_groups are such that those asic_cluster-blocks which
# should form a single asic_node-block go in one group.
axis_index_groups = _asic_cluster_to_asic_node_axis_index_groups(grid)
A = jax.lax.all_gather(
A,
axis_name=AXIS_NAME,
axis_index_groups=axis_index_groups,
)
# Each core now has block_rows * block_cols asic_cluster-blocks, that we reshape to
# form a single asic_node-block.
block_rows = NROWS // asic_node_rows
block_cols = NCOLS // asic_node_cols
A = A.reshape((block_rows, block_cols, local_rows, local_cols)).transpose(
(0, 2, 1, 3)).reshape((block_rows * local_rows, block_cols * local_cols))
# Finally, we need to permute the distribution of the blocks, so that each
# asic_node has a full copy of the matrix distributed in the usual (4, 2) asic_node
# distribution pattern. For reasons hard to explain without getting graphical,
# this corresponds to a permutation where both the row and the column space of
# `grid` is partitioned into a tensor product of two indices, one for the
# asic_cluster-blocks internal to a asic_node-block, and one for the asic_node-blocks, and
# these two indices are swapped.
grid_perm = grid.reshape((asic_node_rows, block_rows, asic_node_cols,
block_cols)).transpose((1, 0, 3, 2)).reshape(
(NROWS, NCOLS))
# grid_perm gives the device ordering we should permute to on the grid. Now we
# just read the elements of grid_perm in the order given by grid, to find out
# the pshuffle permutation that implements this new ordering.
perm = grid_perm.ravel()[misc.inverse_permutation(grid.ravel())]
A = jax.lax.pshuffle(A, axis_name=AXIS_NAME, perm=perm)
return A
################################################################################
# PROCESSOR MASKS
################################################################################
def mask(A, cond):
"""
Returns an array, copied from A, with entries zeroed out where
`cond` is False. `cond` may be a single boolean value (in which case all
of A is either zeroed out or not) or an array of these with the same shape
as `A` (in which case the zeroing-out is done elementwise).
Args:
A: The array to be masked.
cond: False where the mask is to be applied.
Returns:
A: The masked array.
"""
do_not_mask = jnp.zeros_like(A, dtype=np.bool) + cond
return jnp.where(do_not_mask, x=A, y=jnp.zeros_like(A))
def mask_except_pcol(A, pcol):
"""
Return an array, copied from A but with all entries zeroed out save those
which inhabit `pcol`.
E.g. with grid=(2, 2), pcol=1:
A = [ [a], [b] ]
[ [c], [d] ]
Out = [ [0], [b] ]
[ [0], [d] ]
Args:
A: The array to be masked.
pcol: The pcol to preserve.
Returns:
The masked array.
"""
do_not_mask = in_this_pcol(pcol)
return mask(A, do_not_mask)
def mask_except_prow(A, prow):
"""
Return an array, copied from A but with all entries zeroed out save those
which inhabit `prow`.
E.g. with grid=(2, 2), prow=1:
A = [ [a], [b] ]
[ [c], [d] ]
Out = [ [a], [b] ]
[ [0], [0] ]
Args:
A: The array to be masked.
prow: The prow to preserve.
Returns:
The masked array.
"""
do_not_mask = in_this_prow(prow)
return mask(A, do_not_mask)
def mask_off_diagonal(matrix, k=0, unpadded_dim=None):
""" Returns a copy of `matrix` with all values 0 except those on the `k`'th
diagonal.
Args:
matrix: The matrix to mask.
k: The diagonal to leave intact.
unpadded_dim: If specified, only the top left `unpadded_dim x unpadded_dim`
block will be unmasked.
Returns:
A copy of `matrix` with all values 0 except those on the `k`th diagonal.
"""
leave_unmasked = on_kth_diagonal(matrix.shape, k=k, unpadded_dim=unpadded_dim)
return mask(matrix, leave_unmasked)
def apply_pad(matrix, unpadded_dim):
"""Zero-pads all entries of `matrix` outside its top-left unpadded_dim
by unpadded_dim block (of the full distributed matrix).
Args:
matrix: A checkerboard-distributed matrix.
unpadded_dim: Size of the block to leave unpadded.
Returns:
matrix: With the pad applied.
"""
leave_unmasked = within_unpadded_block(matrix.shape, unpadded_dim)
return mask(matrix, leave_unmasked)
################################################################################
# REDUCTIONS
################################################################################
def safe_psum(A, axis_name, **kwargs):
"""Calls `tree_psum` on efloat57 arrays, `jax.lax.psum` for other arrays."""
if A.dtype in (jnp.float64, jnp.complex128):
return tree_psum(A, axis_name, **kwargs)
else:
return jax.lax.psum(A, axis_name, **kwargs)
def tree_psum(A, axis_name, axis_index_groups=None):
"""Compute a `psum` as a tree-reduction, to save memory with ef57.
The return value is the same as that of
`jax.lax.psum(masked, axis_name, axis_index_groups)`.
This function is only needed as a workaround for psum for ef57 using a lot of
memory, see b/188162871.
tree_psum computes the psum as a tree of pair-wise reductions. It makes
log2(N) calls to jax.lax.psum, where N is the range of the sharded index, but
each call only sums over pairs of cores. It only works if all the groups in
`axis_index_groups` are of a length that is a power of two.
Args:
Same as for jax.lax.psum.
Raises:
Same as jax.lax.psum, plus
ValueError if the length of an `axis_index_group` is not a power of two.
Returns:
Same as for jax.lax.psum.
"""
if axis_index_groups is None:
l = jax.lax.psum(1, axis_name)
axis_index_groups = [list(range(l))]
l = len(axis_index_groups[0])
if not misc.is_power_of_two(l):
msg = (f"tree_psum got an axis_index_group of length {l}. "
"It can only handle group lengths that are powers of two.")
raise ValueError(msg)
for group in axis_index_groups:
if len(group) != l:
msg = ("All axis_index_groups must be of equal size, "
f"got two of sizes {l} and {len(group)}.")
raise ValueError(msg)
# The algorithm works like this: Break the cores in each group into pairs,
# psum over those pairs. Keep the first of each pair in the group, and move
# the second one to "trash", i.e. since it's already been absorbed into the
# first one we don't care about it anymore. Repeat the process on the groups
# that now have half as many cores in them, until each group only has one
# core. Those cores hold the result of the psum, that is finally broadcasted
# to the rest of the cores. The reason for keeping track of the "trash" cores
# is that every psum call must involve all cores. So even though we don't care
# about the values in the trash cores, we still need to psum them with
# something.
groups = list(map(list, axis_index_groups)) # Convert to lists, make a copy.
trash = []
while l > 1:
# It doesn't actually matter how we pair up the cores in trash, just pick
# some way.
# REDACTED We could try to optimise the pairing of the cores, to make
# communication distances as short as possible. I suspect they are usually
# close to optimal anyway, so the benefit might be small.
pairs = list(zip(trash[0::2], trash[1::2]))
for i, g in enumerate(groups):
evens = g[0::2]
odds = g[1::2]
pairs += list(zip(evens, odds))
groups[i] = evens
trash += odds
A = jax.lax.psum(A, axis_name, axis_index_groups=pairs)
l = l // 2
# By this point all groups are of length 1. gather_cores are the cores to
# which we have gathered the result of psumming each axis_index_group, and
# from which we will broadcast that result.
gather_cores = sum(groups, [])
core_id = my_name()
is_gather_core = sum(core_id == c for c in gather_cores)
A = mask(A, is_gather_core)
A = _psum_masked(A, axis_name, axis_index_groups=axis_index_groups)
return A
def sum_over_prows(A):
"""
Return an array whose elements are the sum of A's corresponding elements on
its prow.
E.g with grid=(2, 2):
A = [ [1, 2], [3, 4] ]
[ [5, 6], [7, 8] ]
Out = [ [4, 6], [4, 6] ]
[ [12, 14], [12, 14] ]
"""
groups = _axis_index_prows()
return safe_psum(A, AXIS_NAME, axis_index_groups=groups)
def sum_over_pcols(A):
"""
Return an array whose elements are the sum of A's corresponding elements on
its pcol.
E.g with grid=(2, 2):
A = [ [1, 2], [3, 4] ]
[ [5, 6], [7, 8] ]
Out = [ [6, 8], [10, 12] ]
[ [6, 8], [10, 12] ]
"""
groups = _axis_index_pcols()
return safe_psum(A, AXIS_NAME, axis_index_groups=groups)
def sum_prow_pairs(A):
"""
Return an array which differs from A in that each pair of prows has been
summed. On a ASIC asic_cluster slice, this corresponds to summing the data within
each chip.
E.g with grid=(4, 2):
A = [ [1, 2], [3, 4] ]
[ [5, 6], [7, 8] ]
[ [9, 10], [11, 12] ]
[ [13, 14], [15, 16] ]
Out = [ [6, 8], [10, 12] ]
[ [6, 8], [10, 12] ]
[ [21, 24], [26, 28] ]
[ [21, 24], [25, 28] ]
"""
groups = _axis_index_prow_pairs()
return jax.lax.psum(A, axis_name=AXIS_NAME, axis_index_groups=groups)
def trace(A):
"""
Returns the trace of A.
Args:
A: The matrix.
Returns:
The trace (a ShardedDeviceArray with one element per core).
"""
only_diagonal = mask_off_diagonal(A)
local_trace = jnp.sum(only_diagonal)
return jax.lax.psum(local_trace, axis_name=AXIS_NAME)
def frobnorm(A):
"""
Computes the Frobenius norm of A.
Args:
A: The matrix.
Returns:
The norm (a ShardedDeviceArray with one element per core).
"""
squared = jnp.abs(A)**2
local_sum = jnp.sum(squared)
global_sum = jax.lax.psum(local_sum, axis_name=AXIS_NAME)
return jnp.sqrt(global_sum)
def gershgorin(H):
"""
Computes estimates of the smallest and largest eigenvalues of a Hermitian
`H` using the "Gershgorin" method. The estimates are guaranteed to bound the
spectrum, but can be quite loose in many cases.
Args:
H: The Hermitian matrix whose spectrum is to be bounded.
Returns:
min_est: A lower bound on `H`'s smallest eigenvalue.
max_est: An upper bound on `H`'s largest eigenvalue.
"""
def _sum_cols(M):
M = jnp.sum(M, axis=0)
M = sum_over_pcols(M)
M = M.reshape((1, M.shape[0]))
M = gather_rows(M)
return jnp.ravel(M)
def _sum_rows(M):
M = jnp.sum(M, axis=1)
M = sum_over_prows(M)
M = M.reshape((M.shape[0], 1))
M = gather_columns(M)
return jnp.ravel(M)
H_diag = mask_off_diagonal(H)
H_diag = _sum_cols(H_diag)
abs_H_diag0 = jnp.abs(fill_diagonal(H, 0.))
col_sums = _sum_cols(abs_H_diag0)
row_sums = _sum_rows(abs_H_diag0)
row_min = jnp.min(H_diag - row_sums)
col_min = jnp.min(H_diag - col_sums)
min_est = jnp.max(jnp.array([row_min, col_min]))
row_max = jnp.max(H_diag + row_sums)
col_max = jnp.max(H_diag + col_sums)
max_est = jnp.min(jnp.array([row_max, col_max]))
return min_est, max_est
################################################################################
# INDEXING AND MAIN DIAGONAL
################################################################################
@functools.partial(jax.jit, static_argnums=(0,))
def indices_vec(local_shape: Tuple):
""" Given `local_shape = (rows per processor, cols per processor)`,
returns vectors `row_vec` and `col_vec` respectively indexing these rows
and cols within the full checkerboard-distributed matrix.
Args:
local_shape: Shape of the matrix block on each processor.
Returns:
row_vec: Whose `i`th entry indexes the `i`th local row within the full
distributed matrix.
col_vec: Whose `j`th entry indexes the `j`th local row within the full
distributed matrix.
"""
m, n = local_shape
i = my_prow()
j = my_pcol()
rows_vector = jnp.arange(m) + i * m
cols_vector = jnp.arange(n) + j * n
return rows_vector, cols_vector
@functools.partial(jax.jit, static_argnums=(0,))
def indices(local_shape: Tuple):
""" Returns arrays of shape local_shape storing the respctive column
and row indices of each local matrix element within the mathematical
matrix.
Args:
local_shape: Shape of the matrix block on each processor.
Returns:
rows, cols: The mathematical row and column indices of each local matrix
element.
"""
rows_vector, cols_vector = indices_vec(local_shape)
cols, rows = jnp.meshgrid(cols_vector, rows_vector)
return rows, cols
def within_unpadded_block(local_shape, unpadded_dim):
""" Returns a boolean matrix of local shape `local_shape` whose entries
are `True` within the top-left `unpadded_dim x unpadded_dim` block of the
corresponding checkerboard-distributed matrix and `False` elsewhere.
Args:
local_shape: Shape of the matrix block on each processor.
unpadded_dim: Size of the top-left block. May be None, in which case a
matrix of True is returned.
Returns:
A matrix of `True` within the top-left `unpadded_dim x unpadded_dim` block
and `False` elsewhere.
"""
if unpadded_dim is None:
return jnp.ones(local_shape, dtype=np.bool)
rows_vector, cols_vector = indices_vec(local_shape)
left_panel = rows_vector < unpadded_dim
top_panel = cols_vector < unpadded_dim
return jnp.logical_and(left_panel[:, None], top_panel)
def on_kth_diagonal(local_shape: Tuple, k=0, unpadded_dim=None):
""" Returns a boolean matrix of local shape `local_shape` whose entries
are `True` upon the `k`th diagonal of the corresponding
checkerboard-distributed matrix and `False` elsewhere.
Args:
local_shape: Shape of the matrix block on each processor.
k: The diagonal to be selected. k=0 is the main diagonal.
unpadded_dim: If specified, only entries in the top-left
`unpadded_dim x unpadded_dim` block of the global matrix will be
potentially `True`.
Returns:
on_kth_diagonal: Of the given local shape; `True` on the `k`th diagonal
of the global matrix and `False` elsewhere.
"""
rows_vector, cols_vector = indices_vec(local_shape)
cols_vector = cols_vector - k
result = rows_vector[:, None] == cols_vector
if unpadded_dim is not None:
unmasked = within_unpadded_block(local_shape, unpadded_dim)
result = jnp.logical_and(result, unmasked)
return result
def fill_diagonal(matrix, value, k=0, unpadded_dim=None):
"""
Returns a matrix identical to `matrix` except that the `k'th` diagonal has
been overwritten with the value `value`.
Args:
matrix: Matrix whose diagonal to fill.
value: The value to fill the diagonal with.
k: The diagonal to fill.
unpadded_dim: If specified, only the `unpadded_dim x unpadded_dim` top left
block will be filled.
Returns:
A copy of `matrix`, with the `k'th` diagonal replaced by `value`.
"""
replace_here = on_kth_diagonal(matrix.shape, k=k, unpadded_dim=unpadded_dim)
replace_with = jnp.full(replace_here.shape[1], value)
return jnp.where(replace_here, x=replace_with, y=matrix)
def add_to_diagonal(matrix, value, k=0, unpadded_dim=None):
"""
Returns a matrix identical to `matrix` except that the `k'th` diagonal has
been summed with the value `value`; for `k=0` this performs
`matrix = matrix + value * eye`.
Args:
matrix: Matrix whose diagonal to add to.
value: The value to add to the diagonal.
k: The diagonal to increment.
unpadded_dim: If specified, only the `unpadded_dim x unpadded_dim` top left
block will be incremented.
Returns:
A copy of `matrix`, with the `k'th` diagonal incremented by `value`.
"""
add_here = on_kth_diagonal(matrix.shape, k=k, unpadded_dim=unpadded_dim)
return jnp.where(add_here, x=matrix + value, y=matrix)
################################################################################
# MATRIX OPERATIONS
################################################################################
def _transpose_preprocess():
"""Do matrix-independent preprocessing in preparation of transposing matrices.
Many things about how distributed matrices are transposed depend only on the
processor grid, and not on the matrix. We compute those things here, so that
they can be computed once at load-time, rather than every time `transpose` is
JIT compiled.
Transposes are only supported for processor grids with proportions 2:1 or 1:2,
which informs the design of this function.
Args:
N/A
Returns:
horizontal_devices: Whether device blocks are more wide than tall, or vice
versa.
perm0: Permutation for the first pshuffle.
perm1: Permutation for the second pshuffle.
pre_reversed_devices: Which device numbers need to reverse their blocks
before the pshuffles.
pre_reversed_devices: Which device numbers need to reverse their blocks
after the pshuffles.
"""
# The square matrix is split into equal sized square blocks. Each device holds
# two of these blocks, if horizontal_devices is True then as
# [[block0, block1]], otherwise as
# [[block0],
# [block1]].
if NROWS == NCOLS * 2:
horizontal_devices = True
elif NCOLS == NROWS * 2:
horizontal_devices = False
else:
msg = ("WARNING: transpose is only supported for device grids with "
"proportions 2:1 or 1:2.")
# TODO use logging.warn
print(msg)
return None
# There are max_grid_dim * max_grid_dim blocks in total. Each block is given a
# numerical label, and these labels are organised in a block_grid. The name
# _pre refers to the situation before the transpose, _post to after.
max_grid_dim = max(NCOLS, NROWS)
block_grid_pre = | np.arange(max_grid_dim**2) | numpy.arange |
#!/usr/bin/env python
import numpy as np
import cv2 as cv
import math
def find_greater_contour(contours):
"""
search the greatest contour
:param contours: array
:return: list, int
"""
largest_area = 0
largest_contour_index = -1
i = 0
total_contours = len(contours)
while (i < total_contours):
area = cv.contourArea(contours[i])
if (area > largest_area):
largest_area = area
largest_contour_index = i
i += 1
return largest_area, largest_contour_index
def find_angle(box):
"""
calculate angle between OX and longer side
:param box: array
:return:int
"""
edge1 = np.int0((box[1][0] - box[0][0], box[1][1] - box[0][1]))
edge2 = | np.int0((box[2][0] - box[1][0], box[2][1] - box[1][1])) | numpy.int0 |
#!/usr/bin/env python
""" Classification of multiple bundles from multiple examples.
"""
from __future__ import print_function, division
import os
import sys
import argparse
import numpy as np
import time
import ntpath
import nibabel as nib
import pickle
from utils import compute_kdtree_and_dr_tractogram, compute_superset, streamlines_idx, save_trk
from dipy.tracking.distances import bundles_distances_mam, bundles_distances_mdf
from dipy.tracking.streamline import set_number_of_points
from collections import OrderedDict
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from features_mni import compute_feature_matrix
#global configuration parameters
num_prototypes = 100
distance_func = bundles_distances_mdf
nb_points = 20
cw = {0:1, 1:3}
max_iter = 1000
def compute_X_y_train(tract_name, moving_tractogram_fname, example_fname):
"""Compute X_train and y_train.
"""
moving_tractogram = nib.streamlines.load(moving_tractogram_fname)
moving_tractogram = moving_tractogram.streamlines
print("Compute kdt and prototypes of %s" %moving_tractogram_fname)
kdt, prototypes = compute_kdtree_and_dr_tractogram(moving_tractogram, num_prototypes=num_prototypes,
distance_func=distance_func, nb_points=nb_points)
tract = nib.streamlines.load(example_fname)
tract = tract.streamlines
print("Computing the superset of %s" %example_fname)
superset_idx = compute_superset(tract, kdt, prototypes, k=2000, distance_func=distance_func, nb_points=nb_points)
superset = moving_tractogram[superset_idx]
exID = ntpath.basename(moving_tractogram_fname)[4:10]
print("Computing X_train.")
X_train = compute_feature_matrix(superset, tract_name, distance_func=distance_func, nb_points=nb_points)
print("Computing y_train.")
y_train = np.zeros(len(superset))
tract_idx = streamlines_idx(tract, kdt, prototypes, distance_func=distance_func, nb_points=nb_points)
correspondent_idx = np.array([np.where(superset_idx==idx) for idx in tract_idx])
y_train[correspondent_idx] = 1
return X_train, y_train
def compute_union_superset_idx(kdt, prototypes, ex_dir_tract, distance_func=bundles_distances_mam, nb_points=20):
"""Compute a superset in a subject starting from the tracts of other subjects.
"""
union_superset_idx = []
examples = os.listdir(ex_dir_tract)
examples.sort()
ne = len(examples)
th = np.min([ne, 10]) #maximum 10 subjects
for i in range(th):
example_fname = '%s/%s' %(ex_dir_tract, examples[i])
tract = nib.streamlines.load(example_fname)
tract = tract.streamlines
superset_idx_test = compute_superset(tract, kdt, prototypes, k=2000, distance_func=distance_func, nb_points=nb_points)
union_superset_idx = np.concatenate((union_superset_idx, superset_idx_test))
print("Total size superset: %s" %len(union_superset_idx))
union_superset_idx = list(OrderedDict.fromkeys(union_superset_idx)) #removes duplicates
union_superset_idx = np.array(union_superset_idx, dtype=int)
print("Size reducted superset: %s" %len(union_superset_idx))
return union_superset_idx
def classifyber(moving_tractograms_dir, static_tractogram_fname, ex_dir_tract):
"""Code for classification from multiple examples.
"""
tract_name = ntpath.basename(ex_dir_tract)
moving_tractograms = os.listdir(moving_tractograms_dir)
moving_tractograms.sort()
examples = os.listdir(ex_dir_tract)
examples.sort()
nt = len(moving_tractograms)
ne = len(examples)
assert(nt == ne)
X_train = np.array([])
y_train = | np.array([]) | numpy.array |
# Copyright (c) 2020 <NAME>, <NAME>, <NAME> and
# <NAME>
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
import numpy as np
import time
import unittest
from bark.runtime.commons.parameters import ParameterServer
from bark.runtime.viewer.matplotlib_viewer import MPViewer
from bark.runtime.commons.xodr_parser import XodrParser
from bark.core.models.behavior import BehaviorConstantVelocity, \
BehaviorMPContinuousActions
from bark.core.models.execution import ExecutionModelInterpolate
from bark.core.models.dynamic import SingleTrackModel, StateDefinition
from bark.core.world import World
from bark.core.world.goal_definition import GoalDefinitionPolygon, \
GoalDefinitionStateLimits, GoalDefinitionSequential, \
GoalDefinitionStateLimitsFrenet
from bark.core.world.agent import Agent
from bark.core.world.map import MapInterface
from bark.core.geometry.standard_shapes import CarLimousine
from bark.core.geometry import Point2d, Polygon2d, Line2d
from bark.core.world.evaluation import EvaluatorGoalReached, \
EvaluatorCollisionEgoAgent, EvaluatorStepCount
class EvaluationTests(unittest.TestCase):
def test_one_agent_at_goal_polygon(self):
param_server = ParameterServer()
# Model Definition
behavior_model = BehaviorConstantVelocity(param_server)
execution_model = ExecutionModelInterpolate(param_server)
dynamic_model = SingleTrackModel(param_server)
# Agent Definition
agent_2d_shape = CarLimousine()
init_state = np.array([0, -191.789,-50.1725, 3.14*3.0/4.0, 150/3.6])
agent_params = param_server.AddChild("agent1")
goal_polygon = Polygon2d([0, 0, 0],
[Point2d(-4,-4),
Point2d(-4,4),
Point2d(4,4),
Point2d(4,-4)])
goal_polygon = goal_polygon.Translate(Point2d(-191.789,-50.1725))
agent = Agent(init_state,
behavior_model,
dynamic_model,
execution_model,
agent_2d_shape,
agent_params,
GoalDefinitionPolygon(goal_polygon),
None)
world = World(param_server)
world.AddAgent(agent)
evaluator = EvaluatorGoalReached(agent.id)
world.AddEvaluator("success", evaluator)
info = world.Evaluate()
self.assertEqual(info["success"], True)
def test_one_agent_at_goal_state_limits(self):
param_server = ParameterServer()
# Model Definition
behavior_model = BehaviorConstantVelocity(param_server)
execution_model = ExecutionModelInterpolate(param_server)
dynamic_model = SingleTrackModel(param_server)
# Agent Definition
agent_2d_shape = CarLimousine()
init_state = | np.array([0, -191.789,-50.1725, 3.14*3.0/4.0, 150/3.6]) | numpy.array |
import copy
import numpy as np
import random
from sklearn.utils import shuffle
from ml_utils import ActivationFunctions, LossFunctions
import time
from serializer import Serializer
class NamesToNationalityClassifier:
def __init__(self, possible_labels, alpha=0.0001, hidden_dimensions=500, l2_lambda = 0.02, momentum=0.9, num_epoche=30):
self.serializer = Serializer(possible_labels)
self.alpha = alpha
self.input_dimensions = self.serializer.input_dimensions
self.hidden_dimensions = hidden_dimensions
self.output_dimensions = self.serializer.target_dimensions
self.training_to_validation_ratio = 0.7 # This means 70% of the dataset will be used for training, and 30% is for validation
# Weight Initialization
# We are using the Xavier initialization
# Reference: https://medium.com/usf-msds/deep-learning-best-practices-1-weight-initialization-14e5c0295b94
self.weight_init_type = 'X1'
self.W0 = np.random.randn(self.hidden_dimensions, self.hidden_dimensions) * np.sqrt(1 / self.hidden_dimensions)
self.W1 = np.random.randn(self.hidden_dimensions, self.input_dimensions + 1) * np.sqrt(1 / (self.input_dimensions + 1))
self.W2 = np.random.randn(self.output_dimensions, self.hidden_dimensions + 1) * | np.sqrt(1 / (self.hidden_dimensions + 1)) | numpy.sqrt |
import unittest
import numpy as np
from dgplib.layers import find_weights, InputLayer, HiddenLayer, OutputLayer
from gpflow.decors import defer_build
from gpflow.kernels import White, RBF
from gpflow.mean_functions import Linear
class LayerTest(unittest.TestCase):
def setUp(self):
pass
def test_build_predict(self):
#Run on small toy dataset and expect answer to be similar to SVGP
pass
class WeightsTest(unittest.TestCase):
def setUp(self):
self.X = | np.array([[2,4], [1,3], [0,0], [0,0]]) | numpy.array |
"""Plotting and video making functions for ARHMMs."""
import pickle
import os
import numpy as np
import torch
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.animation as animation
from behavenet import make_dir_if_not_exists
from behavenet.models import AE as AE
from behavenet.plotting import save_movie
# to ignore imports for sphix-autoapidoc
__all__ = [
'get_discrete_chunks', 'get_state_durations', 'get_latent_arrays_by_dtype',
'get_model_latents_states',
'make_syllable_movies_wrapper', 'make_syllable_movies',
'real_vs_sampled_wrapper', 'make_real_vs_sampled_movies', 'plot_real_vs_sampled',
'plot_states_overlaid_with_latents', 'plot_state_transition_matrix', 'plot_dynamics_matrices',
'plot_obs_biases', 'plot_obs_covariance_matrices']
def get_discrete_chunks(states, include_edges=True):
"""Find occurences of each discrete state.
Parameters
----------
states : :obj:`list`
list of trials; each trial is numpy array containing discrete state for each frame
include_edges : :obj:`bool`
include states at start and end of chunk
Returns
-------
:obj:`list`
list of length discrete states; each list contains all occurences of that discrete state by
:obj:`[chunk number, starting index, ending index]`
"""
max_state = max([max(x) for x in states])
indexing_list = [[] for _ in range(max_state + 1)]
for i_chunk, chunk in enumerate(states):
# pad either side so we get start and end chunks
chunk = np.pad(chunk, (1, 1), mode='constant', constant_values=-1)
# don't add 1 because of start padding, this is now index in original unpadded data
split_indices = np.where(np.ediff1d(chunk) != 0)[0]
# last index will be 1 higher that it should be due to padding
# split_indices[-1] -= 1
for i in range(len(split_indices)-1):
# get which state this chunk was (+1 because data is still padded)
which_state = chunk[split_indices[i]+1]
if not include_edges:
if split_indices[i] != 0 and split_indices[i+1] != (len(chunk)-2):
indexing_list[which_state].append(
[i_chunk, split_indices[i], split_indices[i+1]])
else:
indexing_list[which_state].append(
[i_chunk, split_indices[i], split_indices[i+1]])
# convert lists to numpy arrays
indexing_list = [np.asarray(indexing_list[i_state]) for i_state in range(max_state + 1)]
return indexing_list
def get_state_durations(latents, hmm, include_edges=True):
"""Calculate frame count for each state.
Parameters
----------
latents : :obj:`list` of :obj:`np.ndarray`
latent states
hmm : :obj:`ssm.HMM`
arhmm objecct
include_edges : :obj:`bool`
include states at start and end of chunk
Returns
-------
:obj:`list`
number of frames for each state run; list is empty if single-state model
"""
if hmm.K == 1:
return []
states = [hmm.most_likely_states(x) for x in latents if len(x) > 0]
state_indices = get_discrete_chunks(states, include_edges=include_edges)
durations = []
for i_state in range(0, len(state_indices)):
if len(state_indices[i_state]) > 0:
durations.append(np.concatenate(np.diff(state_indices[i_state][:, 1:3], 1)))
else:
durations.append(np.array([]))
return durations
def get_latent_arrays_by_dtype(data_generator, sess_idxs=0, data_key='ae_latents'):
"""Collect data from data generator and put into dictionary with dtypes for keys.
Parameters
----------
data_generator : :obj:`ConcatSessionsGenerator`
sess_idxs : :obj:`int` or :obj:`list`
concatenate train/test/val data across one or more sessions
data_key : :obj:`str`
key into data generator object; 'ae_latents' | 'labels'
Returns
-------
:obj:`tuple`
- latents (:obj:`dict`): with keys 'train', 'val', 'test'
- trial indices (:obj:`dict`): with keys 'train', 'val', 'test'
"""
if isinstance(sess_idxs, int):
sess_idxs = [sess_idxs]
dtypes = ['train', 'val', 'test']
latents = {key: [] for key in dtypes}
trial_idxs = {key: [] for key in dtypes}
for sess_idx in sess_idxs:
dataset = data_generator.datasets[sess_idx]
for data_type in dtypes:
curr_idxs = dataset.batch_idxs[data_type]
trial_idxs[data_type] += list(curr_idxs)
latents[data_type] += [dataset[i_trial][data_key][0][:] for i_trial in curr_idxs]
return latents, trial_idxs
def get_model_latents_states(
hparams, version, sess_idx=0, return_samples=0, cond_sampling=False, dtype='test',
dtypes=['train', 'val', 'test'], rng_seed=0):
"""Return arhmm defined in :obj:`hparams` with associated latents and states.
Can also return sampled latents and states.
Parameters
----------
hparams : :obj:`dict`
needs to contain enough information to specify an arhmm
version : :obj:`str` or :obj:`int`
test tube model version (can be 'best')
sess_idx : :obj:`int`, optional
session index into data generator
return_samples : :obj:`int`, optional
number of trials to sample from model
cond_sampling : :obj:`bool`, optional
if :obj:`True` return samples conditioned on most likely state sequence; else return
unconditioned samples
dtype : :obj:`str`, optional
trial type to use for conditonal sampling; 'train' | 'val' | 'test'
dtypes : :obj:`array-like`, optional
trial types for which to collect latents and states
rng_seed : :obj:`int`, optional
random number generator seed to control sampling
Returns
-------
:obj:`dict`
- 'model' (:obj:`ssm.HMM` object)
- 'latents' (:obj:`dict`): latents from train, val and test trials
- 'states' (:obj:`dict`): states from train, val and test trials
- 'trial_idxs' (:obj:`dict`): trial indices from train, val and test trials
- 'latents_gen' (:obj:`list`)
- 'states_gen' (:obj:`list`)
"""
from behavenet.data.utils import get_transforms_paths
from behavenet.fitting.utils import experiment_exists
from behavenet.fitting.utils import get_best_model_version
from behavenet.fitting.utils import get_expt_dir
from behavenet.fitting.utils import get_session_dir
hparams['session_dir'], sess_ids = get_session_dir(
hparams, session_source=hparams.get('all_source', 'save'))
hparams['expt_dir'] = get_expt_dir(hparams)
# get version/model
if version == 'best':
version = get_best_model_version(
hparams['expt_dir'], measure='val_loss', best_def='min')[0]
else:
_, version = experiment_exists(hparams, which_version=True)
if version is None:
raise FileNotFoundError(
'Could not find the specified model version in %s' % hparams['expt_dir'])
# load model
model_file = os.path.join(hparams['expt_dir'], 'version_%i' % version, 'best_val_model.pt')
with open(model_file, 'rb') as f:
hmm = pickle.load(f)
# load latents/labels
if hparams['model_class'].find('labels') > -1:
from behavenet.data.utils import load_labels_like_latents
all_latents = load_labels_like_latents(hparams, sess_ids, sess_idx)
else:
_, latents_file = get_transforms_paths('ae_latents', hparams, sess_ids[sess_idx])
with open(latents_file, 'rb') as f:
all_latents = pickle.load(f)
# collect inferred latents/states
trial_idxs = {}
latents = {}
states = {}
for data_type in dtypes:
trial_idxs[data_type] = all_latents['trials'][data_type]
latents[data_type] = [all_latents['latents'][i_trial] for i_trial in trial_idxs[data_type]]
states[data_type] = [hmm.most_likely_states(x) for x in latents[data_type]]
# collect sampled latents/states
if return_samples > 0:
states_gen = []
np.random.seed(rng_seed)
if cond_sampling:
n_latents = latents[dtype][0].shape[1]
latents_gen = [np.zeros((len(state_seg), n_latents)) for state_seg in states[dtype]]
for i_seg, state_seg in enumerate(states[dtype]):
for i_t in range(len(state_seg)):
if i_t >= 1:
latents_gen[i_seg][i_t] = hmm.observations.sample_x(
states[dtype][i_seg][i_t], latents_gen[i_seg][:i_t], input=np.zeros(0))
else:
latents_gen[i_seg][i_t] = hmm.observations.sample_x(
states[dtype][i_seg][i_t],
latents[dtype][i_seg][0].reshape((1, n_latents)), input=np.zeros(0))
else:
latents_gen = []
offset = 200
for i in range(return_samples):
these_states_gen, these_latents_gen = hmm.sample(
latents[dtype][0].shape[0] + offset)
latents_gen.append(these_latents_gen[offset:])
states_gen.append(these_states_gen[offset:])
else:
latents_gen = []
states_gen = []
return_dict = {
'model': hmm,
'latents': latents,
'states': states,
'trial_idxs': trial_idxs,
'latents_gen': latents_gen,
'states_gen': states_gen,
}
return return_dict
def make_syllable_movies_wrapper(
hparams, save_file, sess_idx=0, dtype='test', max_frames=400, frame_rate=10,
min_threshold=0, n_buffer=5, n_pre_frames=3, n_rows=None, single_syllable=None):
"""Present video clips of each individual syllable in separate panels.
This is a high-level function that loads the arhmm model described in the hparams dictionary
and produces the necessary states/video frames.
Parameters
----------
hparams : :obj:`dict`
needs to contain enough information to specify an arhmm
save_file : :obj:`str`
full save file (path and filename)
sess_idx : :obj:`int`, optional
session index into data generator
dtype : :obj:`str`, optional
types of trials to make video with; 'train' | 'val' | 'test'
max_frames : :obj:`int`, optional
maximum number of frames to animate
frame_rate : :obj:`float`, optional
frame rate of saved movie
min_threshold : :obj:`int`, optional
minimum number of frames in a syllable run to be considered for movie
n_buffer : :obj:`int`
number of blank frames between syllable instances
n_pre_frames : :obj:`int`
number of behavioral frames to precede each syllable instance
n_rows : :obj:`int` or :obj:`NoneType`
number of rows in output movie
single_syllable : :obj:`int` or :obj:`NoneType`
choose only a single state for movie
"""
from behavenet.data.data_generator import ConcatSessionsGenerator
from behavenet.data.utils import get_data_generator_inputs
from behavenet.data.utils import get_transforms_paths
from behavenet.fitting.utils import experiment_exists
from behavenet.fitting.utils import get_expt_dir
from behavenet.fitting.utils import get_session_dir
# load images, latents, and states
hparams['session_dir'], sess_ids = get_session_dir(
hparams, session_source=hparams.get('all_source', 'save'))
hparams['expt_dir'] = get_expt_dir(hparams)
hparams['load_videos'] = True
hparams, signals, transforms, paths = get_data_generator_inputs(hparams, sess_ids)
data_generator = ConcatSessionsGenerator(
hparams['data_dir'], sess_ids,
signals_list=[signals[sess_idx]],
transforms_list=[transforms[sess_idx]],
paths_list=[paths[sess_idx]],
device='cpu', as_numpy=True, batch_load=False, rng_seed=hparams['rng_seed_data'])
ims_orig = data_generator.datasets[sess_idx].data['images']
del data_generator # free up memory
# get tt version number
_, version = experiment_exists(hparams, which_version=True)
print('producing syllable videos for arhmm %s' % version)
# load latents/labels
if hparams['model_class'].find('labels') > -1:
from behavenet.data.utils import load_labels_like_latents
latents = load_labels_like_latents(hparams, sess_ids, sess_idx)
else:
_, latents_file = get_transforms_paths('ae_latents', hparams, sess_ids[sess_idx])
with open(latents_file, 'rb') as f:
latents = pickle.load(f)
trial_idxs = latents['trials'][dtype]
# load model
model_file = os.path.join(hparams['expt_dir'], 'version_%i' % version, 'best_val_model.pt')
with open(model_file, 'rb') as f:
hmm = pickle.load(f)
# infer discrete states
states = [hmm.most_likely_states(latents['latents'][s]) for s in latents['trials'][dtype]]
if len(states) == 0:
raise ValueError('No latents for dtype=%s' % dtype)
# find runs of discrete states; state indices is a list, each entry of which is a np array with
# shape (n_state_instances, 3), where the 3 values are:
# chunk_idx, chunk_start_idx, chunk_end_idx
# chunk_idx is in [0, n_chunks], and indexes trial_idxs
state_indices = get_discrete_chunks(states, include_edges=True)
K = len(state_indices)
# get all example over minimum state length threshold
over_threshold_instances = [[] for _ in range(K)]
for i_state in range(K):
if state_indices[i_state].shape[0] > 0:
state_lens = np.diff(state_indices[i_state][:, 1:3], axis=1)
over_idxs = state_lens > min_threshold
over_threshold_instances[i_state] = state_indices[i_state][over_idxs[:, 0]]
np.random.shuffle(over_threshold_instances[i_state]) # shuffle instances
make_syllable_movies(
ims_orig=ims_orig,
state_list=over_threshold_instances,
trial_idxs=trial_idxs,
save_file=save_file,
max_frames=max_frames,
frame_rate=frame_rate,
n_buffer=n_buffer,
n_pre_frames=n_pre_frames,
n_rows=n_rows,
single_syllable=single_syllable)
def make_syllable_movies(
ims_orig, state_list, trial_idxs, save_file=None, max_frames=400, frame_rate=10,
n_buffer=5, n_pre_frames=3, n_rows=None, single_syllable=None):
"""Present video clips of each individual syllable in separate panels
Parameters
----------
ims_orig : :obj:`np.ndarray`
shape (n_frames, n_channels, y_pix, x_pix)
state_list : :obj:`list`
each entry (one per state) contains all occurences of that discrete state by
:obj:`[chunk number, starting index, ending index]`
trial_idxs : :obj:`array-like`
indices into :obj:`states` for which trials should be plotted
save_file : :obj:`str`
full save file (path and filename)
max_frames : :obj:`int`, optional
maximum number of frames to animate
frame_rate : :obj:`float`, optional
frame rate of saved movie
n_buffer : :obj:`int`, optional
number of blank frames between syllable instances
n_pre_frames : :obj:`int`, optional
number of behavioral frames to precede each syllable instance
n_rows : :obj:`int` or :obj:`NoneType`, optional
number of rows in output movie
single_syllable : :obj:`int` or :obj:`NoneType`, optional
choose only a single state for movie
"""
K = len(state_list)
# Initialize syllable movie frames
plt.clf()
if single_syllable is not None:
K = 1
fig_width = 5
n_rows = 1
else:
fig_width = 10 # aiming for dim 1 being 10
# get video dims
bs, n_channels, y_dim, x_dim = ims_orig[0].shape
movie_dim1 = n_channels * y_dim
movie_dim2 = x_dim
if n_rows is None:
n_rows = int(np.floor(np.sqrt(K)))
n_cols = int(np.ceil(K / n_rows))
fig_dim_div = movie_dim2 * n_cols / fig_width
fig_width = (movie_dim2 * n_cols) / fig_dim_div
fig_height = (movie_dim1 * n_rows) / fig_dim_div
fig, axes = plt.subplots(n_rows, n_cols, figsize=(fig_width, fig_height))
for i, ax in enumerate(fig.axes):
ax.set_yticks([])
ax.set_xticks([])
if i >= K:
ax.set_axis_off()
elif single_syllable is not None:
ax.set_title('Syllable %i' % single_syllable, fontsize=16)
else:
ax.set_title('Syllable %i' % i, fontsize=16)
fig.tight_layout(pad=0, h_pad=1.005)
imshow_kwargs = {'animated': True, 'cmap': 'gray', 'vmin': 0, 'vmax': 1}
ims = [[] for _ in range(max_frames + bs + 200)]
# Loop through syllables
for i_k, ax in enumerate(fig.axes):
# skip if no syllable in this axis
if i_k >= K:
continue
print('processing syllable %i/%i' % (i_k + 1, K))
# skip if no syllables are longer than threshold
if len(state_list[i_k]) == 0:
continue
if single_syllable is not None:
i_k = single_syllable
i_chunk = 0
i_frame = 0
while i_frame < max_frames:
if i_chunk >= len(state_list[i_k]):
# show blank if out of syllable examples
im = ax.imshow(np.zeros((movie_dim1, movie_dim2)), **imshow_kwargs)
ims[i_frame].append(im)
i_frame += 1
else:
# Get movies/latents
chunk_idx = state_list[i_k][i_chunk, 0]
which_trial = trial_idxs[chunk_idx]
tr_beg = state_list[i_k][i_chunk, 1]
tr_end = state_list[i_k][i_chunk, 2]
batch = ims_orig[which_trial]
movie_chunk = batch[max(tr_beg - n_pre_frames, 0):tr_end]
movie_chunk = np.concatenate(
[movie_chunk[:, j] for j in range(movie_chunk.shape[1])], axis=1)
# if np.sum(states[chunk_idx][tr_beg:tr_end-1] != i_k) > 0:
# raise ValueError('Misaligned states for syllable segmentation')
# Loop over this chunk
for i in range(movie_chunk.shape[0]):
im = ax.imshow(movie_chunk[i], **imshow_kwargs)
ims[i_frame].append(im)
# Add red box if start of syllable
syllable_start = n_pre_frames if tr_beg >= n_pre_frames else tr_beg
if syllable_start <= i < (syllable_start + 2):
rect = matplotlib.patches.Rectangle(
(5, 5), 10, 10, linewidth=1, edgecolor='r', facecolor='r')
im = ax.add_patch(rect)
ims[i_frame].append(im)
i_frame += 1
# Add buffer black frames
for j in range(n_buffer):
im = ax.imshow(np.zeros((movie_dim1, movie_dim2)), **imshow_kwargs)
ims[i_frame].append(im)
i_frame += 1
i_chunk += 1
print('creating animation...', end='')
ani = animation.ArtistAnimation(
fig,
[ims[i] for i in range(len(ims)) if ims[i] != []], interval=20, blit=True, repeat=False)
print('done')
if save_file is not None:
# put together file name
if save_file[-3:] == 'mp4':
save_file = save_file[:-3]
if single_syllable is not None:
state_str = str('_syllable-%02i' % single_syllable)
else:
state_str = ''
save_file += state_str
save_file += '.mp4'
save_movie(save_file, ani, frame_rate=frame_rate)
def real_vs_sampled_wrapper(
output_type, hparams, save_file, sess_idx, dtype='test', conditional=True, max_frames=400,
frame_rate=20, n_buffer=5, xtick_locs=None, frame_rate_beh=None, format='png'):
"""Produce movie with (AE) reconstructed video and sampled video.
This is a high-level function that loads the model described in the hparams dictionary and
produces the necessary state sequences/samples. The sampled video can be completely
unconditional (states and latents are sampled) or conditioned on the most likely state
sequence.
Parameters
----------
output_type : :obj:`str`
'plot' | 'movie' | 'both'
hparams : :obj:`dict`
needs to contain enough information to specify an autoencoder
save_file : :obj:`str`
full save file (path and filename)
sess_idx : :obj:`int`, optional
session index into data generator
dtype : :obj:`str`, optional
types of trials to make plot/video with; 'train' | 'val' | 'test'
conditional : :obj:`bool`
conditional vs unconditional samples; for creating reconstruction title
max_frames : :obj:`int`, optional
maximum number of frames to animate
frame_rate : :obj:`float`, optional
frame rate of saved movie
n_buffer : :obj:`int`
number of blank frames between animated trials if more one are needed to reach
:obj:`max_frames`
xtick_locs : :obj:`array-like`, optional
tick locations in bin values for plot
frame_rate_beh : :obj:`float`, optional
behavioral video framerate; to properly relabel xticks
format : :obj:`str`, optional
any accepted matplotlib save format, e.g. 'png' | 'pdf' | 'jpeg'
Returns
-------
:obj:`matplotlib.figure.Figure`
matplotlib figure handle if :obj:`output_type='plot'` or :obj:`output_type='both'`, else
nothing returned (movie is saved)
"""
from behavenet.data.utils import get_transforms_paths
from behavenet.fitting.utils import get_expt_dir
from behavenet.fitting.utils import get_session_dir
# check input - cannot create sampled movies for arhmm-labels models (no mapping from labels to
# frames)
if hparams['model_class'].find('labels') > -1:
if output_type == 'both' or output_type == 'movie':
print('warning: cannot create video with "arhmm-labels" model; producing plots')
output_type = 'plot'
# load latents and states (observed and sampled)
model_output = get_model_latents_states(
hparams, '', sess_idx=sess_idx, return_samples=50, cond_sampling=conditional)
if output_type == 'both' or output_type == 'movie':
# load in AE decoder
if hparams.get('ae_model_path', None) is not None:
ae_model_file = os.path.join(hparams['ae_model_path'], 'best_val_model.pt')
ae_arch = pickle.load(
open(os.path.join(hparams['ae_model_path'], 'meta_tags.pkl'), 'rb'))
else:
hparams['session_dir'], sess_ids = get_session_dir(
hparams, session_source=hparams.get('all_source', 'save'))
hparams['expt_dir'] = get_expt_dir(hparams)
_, latents_file = get_transforms_paths('ae_latents', hparams, sess_ids[sess_idx])
ae_model_file = os.path.join(os.path.dirname(latents_file), 'best_val_model.pt')
ae_arch = pickle.load(
open(os.path.join(os.path.dirname(latents_file), 'meta_tags.pkl'), 'rb'))
print('loading model from %s' % ae_model_file)
ae_model = AE(ae_arch)
ae_model.load_state_dict(
torch.load(ae_model_file, map_location=lambda storage, loc: storage))
ae_model.eval()
n_channels = ae_model.hparams['n_input_channels']
y_pix = ae_model.hparams['y_pixels']
x_pix = ae_model.hparams['x_pixels']
# push observed latents through ae decoder
ims_recon = np.zeros((0, n_channels * y_pix, x_pix))
i_trial = 0
while ims_recon.shape[0] < max_frames:
recon = ae_model.decoding(
torch.tensor(model_output['latents'][dtype][i_trial]).float(), None, None). \
cpu().detach().numpy()
recon = np.concatenate([recon[:, i] for i in range(recon.shape[1])], axis=1)
zero_frames = np.zeros((n_buffer, n_channels * y_pix, x_pix)) # add a few black frames
ims_recon = np.concatenate((ims_recon, recon, zero_frames), axis=0)
i_trial += 1
# push sampled latents through ae decoder
ims_recon_samp = np.zeros((0, n_channels * y_pix, x_pix))
i_trial = 0
while ims_recon_samp.shape[0] < max_frames:
recon = ae_model.decoding(torch.tensor(
model_output['latents_gen'][i_trial]).float(), None, None).cpu().detach().numpy()
recon = np.concatenate([recon[:, i] for i in range(recon.shape[1])], axis=1)
zero_frames = np.zeros((n_buffer, n_channels * y_pix, x_pix)) # add a few black frames
ims_recon_samp = np.concatenate((ims_recon_samp, recon, zero_frames), axis=0)
i_trial += 1
make_real_vs_sampled_movies(
ims_recon, ims_recon_samp, conditional=conditional, save_file=save_file,
frame_rate=frame_rate)
if output_type == 'both' or output_type == 'plot':
i_trial = 0
latents = model_output['latents'][dtype][i_trial][:max_frames]
states = model_output['states'][dtype][i_trial][:max_frames]
latents_samp = model_output['latents_gen'][i_trial][:max_frames]
if not conditional:
states_samp = model_output['states_gen'][i_trial][:max_frames]
else:
states_samp = []
fig = plot_real_vs_sampled(
latents, latents_samp, states, states_samp, save_file=save_file, xtick_locs=xtick_locs,
frame_rate=hparams['frame_rate'] if frame_rate_beh is None else frame_rate_beh,
format=format)
if output_type == 'movie':
return None
elif output_type == 'both' or output_type == 'plot':
return fig
else:
raise ValueError('"%s" is an invalid output_type' % output_type)
def make_real_vs_sampled_movies(
ims_recon, ims_recon_samp, conditional, save_file=None, frame_rate=15):
"""Produce movie with (AE) reconstructed video and sampled video.
Parameters
----------
ims_recon : :obj:`np.ndarray`
shape (n_frames, y_pix, x_pix)
ims_recon_samp : :obj:`np.ndarray`
shape (n_frames, y_pix, x_pix)
conditional : :obj:`bool`
conditional vs unconditional samples; for creating reconstruction title
save_file : :obj:`str`, optional
full save file (path and filename)
frame_rate : :obj:`float`, optional
frame rate of saved movie
"""
n_frames = ims_recon.shape[0]
n_plots = 2
[y_pix, x_pix] = ims_recon[0].shape
fig_dim_div = x_pix * n_plots / 10 # aiming for dim 1 being 10
x_dim = x_pix * n_plots / fig_dim_div
y_dim = y_pix / fig_dim_div
fig, axes = plt.subplots(1, n_plots, figsize=(x_dim, y_dim))
for j in range(2):
axes[j].set_xticks([])
axes[j].set_yticks([])
axes[0].set_title('Real Reconstructions\n', fontsize=16)
if conditional:
title_str = 'Generative Reconstructions\n(Conditional)'
else:
title_str = 'Generative Reconstructions\n(Unconditional)'
axes[1].set_title(title_str, fontsize=16)
fig.tight_layout(pad=0)
im_kwargs = {'cmap': 'gray', 'vmin': 0, 'vmax': 1, 'animated': True}
ims = []
for i in range(n_frames):
ims_curr = []
im = axes[0].imshow(ims_recon[i], **im_kwargs)
ims_curr.append(im)
im = axes[1].imshow(ims_recon_samp[i], **im_kwargs)
ims_curr.append(im)
ims.append(ims_curr)
ani = animation.ArtistAnimation(fig, ims, blit=True, repeat_delay=1000)
save_movie(save_file, ani, frame_rate=frame_rate)
def plot_real_vs_sampled(
latents, latents_samp, states, states_samp, save_file=None, xtick_locs=None,
frame_rate=None, format='png'):
"""Plot real and sampled latents overlaying real and (potentially sampled) states.
Parameters
----------
latents : :obj:`np.ndarray`
shape (n_frames, n_latents)
latents_samp : :obj:`np.ndarray`
shape (n_frames, n_latents)
states : :obj:`np.ndarray`
shape (n_frames,)
states_samp : :obj:`np.ndarray`
shape (n_frames,) if :obj:`latents_samp` are not conditioned on :obj:`states`, otherwise
shape (0,)
save_file : :obj:`str`
full save file (path and filename)
xtick_locs : :obj:`array-like`, optional
tick locations in bin values for plot
frame_rate : :obj:`float`, optional
behavioral video framerate; to properly relabel xticks
format : :obj:`str`, optional
any accepted matplotlib save format, e.g. 'png' | 'pdf' | 'jpeg'
Returns
-------
:obj:`matplotlib.figure.Figure`
matplotlib figure handle
"""
fig, axes = plt.subplots(2, 1, figsize=(10, 8))
# plot observations
axes[0] = plot_states_overlaid_with_latents(
latents, states, ax=axes[0], xtick_locs=xtick_locs, frame_rate=frame_rate)
axes[0].set_xticks([])
axes[0].set_xlabel('')
axes[0].set_title('Inferred latents')
# plot samples
if len(states_samp) == 0:
plot_states = states
title_str = 'Sampled latents'
else:
plot_states = states_samp
title_str = 'Sampled states and latents'
axes[1] = plot_states_overlaid_with_latents(
latents_samp, plot_states, ax=axes[1], xtick_locs=xtick_locs, frame_rate=frame_rate)
axes[1].set_title(title_str)
if save_file is not None:
make_dir_if_not_exists(save_file)
plt.savefig(save_file + '.' + format, dpi=300, format=format)
return fig
def plot_states_overlaid_with_latents(
latents, states, save_file=None, ax=None, xtick_locs=None, frame_rate=None, cmap='tab20b',
format='png'):
"""Plot states for a single trial overlaid with latents.
Parameters
----------
latents : :obj:`np.ndarray`
shape (n_frames, n_latents)
states : :obj:`np.ndarray`
shape (n_frames,)
save_file : :obj:`str`, optional
full save file (path and filename)
ax : :obj:`matplotlib.Axes` object or :obj:`NoneType`, optional
axes to plot into; if :obj:`NoneType`, a new figure is created
xtick_locs : :obj:`array-like`, optional
tick locations in bin values for plot
frame_rate : :obj:`float`, optional
behavioral video framerate; to properly relabel xticks
cmap : :obj:`str`, optional
matplotlib colormap
format : :obj:`str`, optional
any accepted matplotlib save format, e.g. 'png' | 'pdf' | 'jpeg'
Returns
-------
:obj:`matplotlib.figure.Figure`
matplotlib figure handle if :obj:`ax=None`, otherwise updated axis
"""
if ax is None:
fig = plt.figure(figsize=(8, 4))
ax = fig.gca()
else:
fig = None
spc = 1.1 * abs(latents.max())
n_latents = latents.shape[1]
plotting_latents = latents + spc * np.arange(n_latents)
ymin = min(-spc, np.min(plotting_latents))
ymax = max(spc * n_latents, np.max(plotting_latents))
ax.imshow(
states[None, :], aspect='auto', extent=(0, len(latents), ymin, ymax), cmap=cmap,
alpha=1.0)
ax.plot(plotting_latents, '-k', lw=3)
ax.set_ylim([ymin, ymax])
# yticks = spc * np.arange(n_latents)
# ax.set_yticks(yticks[::2])
# ax.set_yticklabels(np.arange(n_latents)[::2])
ax.set_yticks([])
# ax.set_ylabel('Latent')
ax.set_xlabel('Time (bins)')
if xtick_locs is not None:
ax.set_xticks(xtick_locs)
if frame_rate is not None:
ax.set_xticklabels((np.asarray(xtick_locs) / frame_rate).astype('int'))
ax.set_xlabel('Time (sec)')
if save_file is not None:
make_dir_if_not_exists(save_file)
plt.savefig(save_file + '.' + format, dpi=300, format=format)
if fig is None:
return ax
else:
return fig
def plot_state_transition_matrix(model, deridge=False):
"""Plot Markov transition matrix for arhmm.
Parameters
----------
model : :obj:`ssm.HMM` object
deridge : :obj:`bool`, optional
remove diagonal to more clearly see dynamic range of off-diagonal entries
Returns
-------
:obj:`matplotlib.figure.Figure`
matplotlib figure handle
"""
trans = np.copy(model.transitions.transition_matrix)
if deridge:
n_states = trans.shape[0]
for i in range(n_states):
trans[i, i] = np.nan
clim = np.nanmax(np.abs(trans))
else:
clim = 1
fig = plt.figure()
plt.imshow(trans, clim=[-clim, clim], cmap='RdBu_r')
plt.colorbar()
plt.ylabel('State (t)')
plt.xlabel('State (t+1)')
plt.title('State transition matrix')
plt.show()
return fig
def plot_dynamics_matrices(model, deridge=False):
"""Plot autoregressive dynamics matrices for arhmm.
Parameters
----------
model : :obj:`ssm.HMM` object
deridge : :obj:`bool`, optional
remove diagonal to more clearly see dynamic range of off-diagonal entries
Returns
-------
:obj:`matplotlib.figure.Figure`
matplotlib figure handle
"""
K = model.K
D = model.D
n_lags = model.observations.lags
if n_lags == 1:
n_cols = 3
fac = 1
elif n_lags == 2:
n_cols = 3
fac = 1 / n_lags
elif n_lags == 3:
n_cols = 3
fac = 1.25 / n_lags
elif n_lags == 4:
n_cols = 3
fac = 1.50 / n_lags
elif n_lags == 5:
n_cols = 2
fac = 1.75 / n_lags
else:
n_cols = 1
fac = 1
n_rows = int(np.ceil(K / n_cols))
fig = plt.figure(figsize=(4 * n_cols, 4 * n_rows * fac))
mats = np.copy(model.observations.As)
if deridge:
for k in range(K):
for d in range(model.D):
mats[k, d, d] = np.nan
clim = np.nanmax(np.abs(mats))
else:
clim = np.max( | np.abs(mats) | numpy.abs |
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common corruptions to images.
Define 15+4 common image corruptions: Gaussian noise, shot noise, impulse_noise,
defocus blur, frosted glass blur, zoom blur, fog, brightness, contrast, elastic,
pixelate, jpeg compression, frost, snow, and motion blur.
4 extra corruptions: gaussian blur, saturate, spatter, and speckle noise.
"""
import io
import subprocess
import tempfile
import numpy as np
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
# To be populated by download_manager
FROST_FILENAMES = []
def _imagemagick_bin():
return 'imagemagick' # pylint: disable=unreachable
# /////////////// Corruption Helpers ///////////////
def around_and_astype(x):
"""Round a numpy array, and convert to uint8.
Args:
x: numpy array.
Returns:
numpy array with dtype uint8.
"""
return np.around(x).astype(np.uint8)
def disk(radius, alias_blur=0.1, dtype=np.float32):
"""Generating a Gaussian blurring kernel with disk shape.
Generating a Gaussian blurring kernel with disk shape using cv2 API.
Args:
radius: integer, radius of blurring kernel.
alias_blur: float, standard deviation of Gaussian blurring.
dtype: data type of kernel
Returns:
cv2 object of the Gaussian blurring kernel.
"""
if radius <= 8:
length = np.arange(-8, 8 + 1)
ksize = (3, 3)
else:
length = np.arange(-radius, radius + 1)
ksize = (5, 5)
x_axis, y_axis = np.meshgrid(length, length)
aliased_disk = np.array((x_axis**2 + y_axis**2) <= radius**2, dtype=dtype)
aliased_disk /= np.sum(aliased_disk)
# supersample disk to antialias
return tfds.core.lazy_imports.cv2.GaussianBlur(
aliased_disk, ksize=ksize, sigmaX=alias_blur)
def clipped_zoom(img, zoom_factor):
"""Zoom image with clipping.
Zoom the central part of the image and clip extra pixels.
Args:
img: numpy array, uncorrupted image.
zoom_factor: numpy array, a sequence of float numbers for zoom factor.
Returns:
numpy array, zoomed image after clipping.
"""
h = img.shape[0]
ch = int(np.ceil(h / float(zoom_factor)))
top_h = (h - ch) // 2
w = img.shape[1]
cw = int(np.ceil(w / float(zoom_factor)))
top_w = (w - cw) // 2
img = tfds.core.lazy_imports.scipy.ndimage.zoom(
img[top_h:top_h + ch, top_w:top_w + cw], (zoom_factor, zoom_factor, 1),
order=1)
# trim off any extra pixels
trim_top_h = (img.shape[0] - h) // 2
trim_top_w = (img.shape[1] - w) // 2
return img[trim_top_h:trim_top_h + h, trim_top_w:trim_top_w + w]
def plasma_fractal(mapsize=512, wibbledecay=3):
"""Generate a heightmap using diamond-square algorithm.
Modification of the algorithm in
https://github.com/FLHerne/mapgen/blob/master/diamondsquare.py
Args:
mapsize: side length of the heightmap, must be a power of two.
wibbledecay: integer, decay factor.
Returns:
numpy 2d array, side length 'mapsize', of floats in [0,255].
"""
if mapsize & (mapsize - 1) != 0:
raise ValueError('mapsize must be a power of two.')
maparray = np.empty((mapsize, mapsize), dtype=np.float_)
maparray[0, 0] = 0
stepsize = mapsize
wibble = 100
def wibbledmean(array):
return array / 4 + wibble * np.random.uniform(-wibble, wibble, array.shape)
def fillsquares():
"""For each square, calculate middle value as mean of points + wibble."""
cornerref = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
squareaccum = cornerref + np.roll(cornerref, shift=-1, axis=0)
squareaccum += np.roll(squareaccum, shift=-1, axis=1)
maparray[stepsize // 2:mapsize:stepsize,
stepsize // 2:mapsize:stepsize] = wibbledmean(squareaccum)
def filldiamonds():
"""For each diamond, calculate middle value as meanof points + wibble."""
mapsize = maparray.shape[0]
drgrid = maparray[stepsize // 2:mapsize:stepsize,
stepsize // 2:mapsize:stepsize]
ulgrid = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
ldrsum = drgrid + np.roll(drgrid, 1, axis=0)
lulsum = ulgrid + np.roll(ulgrid, -1, axis=1)
ltsum = ldrsum + lulsum
maparray[0:mapsize:stepsize,
stepsize // 2:mapsize:stepsize] = wibbledmean(ltsum)
tdrsum = drgrid + np.roll(drgrid, 1, axis=1)
tulsum = ulgrid + np.roll(ulgrid, -1, axis=0)
ttsum = tdrsum + tulsum
maparray[stepsize // 2:mapsize:stepsize,
0:mapsize:stepsize] = wibbledmean(ttsum)
while stepsize >= 2:
fillsquares()
filldiamonds()
stepsize //= 2
wibble /= wibbledecay
maparray -= maparray.min()
return maparray / maparray.max()
# /////////////// End Corruption Helpers ///////////////
# /////////////// Corruptions ///////////////
def gaussian_noise(x, severity=1):
"""Gaussian noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added Gaussian noise.
"""
c = [.08, .12, 0.18, 0.26, 0.38][severity - 1]
x = np.array(x) / 255.
x_clip = np.clip(x + np.random.normal(size=x.shape, scale=c), 0, 1) * 255
return around_and_astype(x_clip)
def shot_noise(x, severity=1):
"""Shot noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added shot noise.
"""
c = [60, 25, 12, 5, 3][severity - 1]
x = np.array(x) / 255.
x_clip = np.clip(np.random.poisson(x * c) / float(c), 0, 1) * 255
return around_and_astype(x_clip)
def impulse_noise(x, severity=1):
"""Impulse noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added impulse noise.
"""
c = [.03, .06, .09, 0.17, 0.27][severity - 1]
x = tfds.core.lazy_imports.skimage.util.random_noise(
np.array(x) / 255., mode='s&p', amount=c)
x_clip = np.clip(x, 0, 1) * 255
return around_and_astype(x_clip)
def defocus_blur(x, severity=1):
"""Defocus blurring to images.
Apply defocus blurring to images using Gaussian kernel.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied defocus blur.
"""
c = [(3, 0.1), (4, 0.5), (6, 0.5), (8, 0.5), (10, 0.5)][severity - 1]
x = np.array(x) / 255.
kernel = disk(radius=c[0], alias_blur=c[1])
channels = []
for d in range(3):
channels.append(tfds.core.lazy_imports.cv2.filter2D(x[:, :, d], -1, kernel))
channels = np.array(channels).transpose((1, 2, 0)) # 3x224x224 -> 224x224x3
x_clip = np.clip(channels, 0, 1) * 255
return around_and_astype(x_clip)
def glass_blur(x, severity=1):
"""Frosted glass blurring to images.
Apply frosted glass blurring to images by shuffling pixels locally.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied frosted glass blur.
"""
# sigma, max_delta, iterations
c = [(0.7, 1, 2), (0.9, 2, 1), (1, 2, 3), (1.1, 3, 2),
(1.5, 4, 2)][severity - 1]
x = np.uint8(
tfds.core.lazy_imports.skimage.filters.gaussian(
np.array(x) / 255., sigma=c[0], multichannel=True) * 255)
# locally shuffle pixels
for _ in range(c[2]):
for h in range(x.shape[0] - c[1], c[1], -1):
for w in range(x.shape[1] - c[1], c[1], -1):
dx, dy = np.random.randint(-c[1], c[1], size=(2,))
h_prime, w_prime = h + dy, w + dx
# swap
x[h, w], x[h_prime, w_prime] = x[h_prime, w_prime], x[h, w]
x_clip = np.clip(
tfds.core.lazy_imports.skimage.filters.gaussian(
x / 255., sigma=c[0], multichannel=True), 0, 1)
x_clip *= 255
return around_and_astype(x_clip)
def zoom_blur(x, severity=1):
"""Zoom blurring to images.
Applying zoom blurring to images by zooming the central part of the images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied zoom blur.
"""
c = [
np.arange(1, 1.11, 0.01),
np.arange(1, 1.16, 0.01),
np.arange(1, 1.21, 0.02),
np.arange(1, 1.26, 0.02),
np.arange(1, 1.31, 0.03)
][severity - 1]
x = (np.array(x) / 255.).astype(np.float32)
out = np.zeros_like(x)
for zoom_factor in c:
out += clipped_zoom(x, zoom_factor)
x = (x + out) / (len(c) + 1)
x_clip = np.clip(x, 0, 1) * 255
return around_and_astype(x_clip)
def fog(x, severity=1):
"""Fog corruption to images.
Adding fog to images. Fog is generated by diamond-square algorithm.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added fog.
"""
c = [(1.5, 2), (2., 2), (2.5, 1.7), (2.5, 1.5), (3., 1.4)][severity - 1]
x = np.array(x) / 255.
max_val = x.max()
mapsize = 512
shape = x.shape
max_length = max(shape[0], shape[1])
if max_length > mapsize:
mapsize = 2**int(np.ceil(np.log2(float(max_length))))
tmp = plasma_fractal(mapsize=mapsize, wibbledecay=c[1])
tmp = tmp[:x.shape[0], :x.shape[1]]
tmp = tmp[..., np.newaxis]
x += c[0] * tmp
x_clip = np.clip(x * max_val / (max_val + c[0]), 0, 1) * 255
return around_and_astype(x_clip)
def brightness(x, severity=1):
"""Change brightness of images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Changed brightness.
"""
c = [.1, .2, .3, .4, .5][severity - 1]
x = np.array(x) / 255.
x = tfds.core.lazy_imports.skimage.color.rgb2hsv(x)
x[:, :, 2] = np.clip(x[:, :, 2] + c, 0, 1)
x = tfds.core.lazy_imports.skimage.color.hsv2rgb(x)
x_clip = np.clip(x, 0, 1) * 255
return around_and_astype(x_clip)
def contrast(x, severity=1):
"""Change contrast of images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Changed contrast.
"""
c = [0.4, .3, .2, .1, .05][severity - 1]
x = np.array(x) / 255.
means = np.mean(x, axis=(0, 1), keepdims=True)
x_clip = np.clip((x - means) * c + means, 0, 1) * 255
return around_and_astype(x_clip)
def elastic_transform(x, severity=1):
"""Conduct elastic transform to images.
Elastic transform is performed on small patches of the images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied elastic transform.
"""
c = [(244 * 2, 244 * 0.7, 244 * 0.1), (244 * 2, 244 * 0.08, 244 * 0.2),
(244 * 0.05, 244 * 0.01, 244 * 0.02),
(244 * 0.07, 244 * 0.01, 244 * 0.02),
(244 * 0.12, 244 * 0.01, 244 * 0.02)][severity - 1]
image = np.array(x, dtype=np.float32) / 255.
shape = image.shape
shape_size = shape[:2]
# random affine
center_square = np.float32(shape_size) // 2
square_size = min(shape_size) // 3
pts1 = np.float32([
center_square + square_size,
[center_square[0] + square_size, center_square[1] - square_size],
center_square - square_size
])
pts2 = pts1 + np.random.uniform(
-c[2], c[2], size=pts1.shape).astype(np.float32)
affine_trans = tfds.core.lazy_imports.cv2.getAffineTransform(pts1, pts2)
image = tfds.core.lazy_imports.cv2.warpAffine(
image,
affine_trans,
shape_size[::-1],
borderMode=tfds.core.lazy_imports.cv2.BORDER_REFLECT_101)
dx = (tfds.core.lazy_imports.skimage.filters.gaussian(
np.random.uniform(-1, 1, size=shape[:2]),
c[1],
mode='reflect',
truncate=3) * c[0]).astype(np.float32)
dy = (tfds.core.lazy_imports.skimage.filters.gaussian(
np.random.uniform(-1, 1, size=shape[:2]),
c[1],
mode='reflect',
truncate=3) * c[0]).astype(np.float32)
dx, dy = dx[..., np.newaxis], dy[..., np.newaxis]
x, y, z = np.meshgrid(
np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))
indices = np.reshape(y + dy,
(-1, 1)), np.reshape(x + dx,
(-1, 1)), np.reshape(z, (-1, 1))
x_clip = np.clip(
tfds.core.lazy_imports.scipy.ndimage.interpolation.map_coordinates(
image, indices, order=1, mode='reflect').reshape(shape), 0, 1) * 255
return around_and_astype(x_clip)
def pixelate(x, severity=1):
"""Pixelate images.
Conduct pixelating corruptions to images by first shrinking the images and
then resizing to original size.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied pixelating
corruption.
"""
c = [0.6, 0.5, 0.4, 0.3, 0.25][severity - 1]
shape = x.shape
x = tfds.core.lazy_imports.PIL_Image.fromarray(x.astype(np.uint8))
x = x.resize((int(shape[1] * c), int(shape[0] * c)))
x = x.resize((shape[1], shape[0]))
return np.asarray(x)
def jpeg_compression(x, severity=1):
"""Conduct jpeg compression to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied jpeg compression.
"""
c = [25, 18, 15, 10, 7][severity - 1]
x = tfds.core.lazy_imports.PIL_Image.fromarray(x.astype(np.uint8))
output = io.BytesIO()
x.save(output, 'JPEG', quality=c)
output.seek(0)
x = tfds.core.lazy_imports.PIL_Image.open(output)
return | np.asarray(x) | numpy.asarray |
import burer_monteiro as bm
import numpy as np
import spectral_gap_analysis as analysis
import aux
import noise_generator as gen
import sdp
if __name__ == '__main__':
n = 10
k = 2
level = 5
noise_type = 'uniform'
while True:
z = np.ones(n).reshape((-1, 1))
ground_truth = z.dot(z.T)
if noise_type == 'positive-rows':
N = analysis._gen_row_mat(n, level)
elif noise_type == 'positive-uniform':
N = gen.uniform_noise(n, 10) + 10
N = N - np.diag(np.diag(N))
# Compute spectral gap
gap = aux.laplacian_eigs(N, z)[1]
print('Spectral gap of perturbation: {}'.format(gap))
A = ground_truth + N
# Compute SDP solution for certain matrix
relaxation_val, _, _ = sdp.sdp_relaxation(A, z)
relaxation_val_minus, _, _ = sdp.sdp_relaxation(-A, z)
relaxation_val_N, _, _ = sdp.sdp_relaxation(N, z)
relaxation_val_minus_N, _, _ = sdp.sdp_relaxation(-N, z)
print('sdp(A) = {}'.format(relaxation_val))
print('sdp(-A) = {}'.format(relaxation_val_minus))
print('sdp(N) = {}'.format(relaxation_val_N))
print('sdp(-N) = {}'.format(relaxation_val_minus_N))
print('Smallest eigenvalue of A = {}'.format(np.linalg.eigvals(A)[0]))
# Compute BM solution for the problem
Q = bm._vector_to_matrix(bm.trust_region(A, k, plotting=False, printing=False).x, k)
# print(np.linalg.norm(Q.dot(Q.T) - ground_truth))
# Formulate vector on tangent plane
Q_dot = Q
Q_dot[:, 0] = Q[:, 1]
Q_dot[:, 1] = Q[:, 0]
# Check smallest eigenvalue of the Hessian
left = (np.diag(np.diag((A.dot(Q)).dot(Q.T))) - A).dot(Q_dot) - (np.diag(np.diag((A.dot(Q_dot)).dot(Q.T))) - A).dot(Q)
inner = np.trace((left.reshape((k, n))).dot(Q_dot))
# print(inner)
l = Q_dot - np.diag(np.diag(Q_dot.dot(Q.T))).dot(Q)
inner2 = np.trace((l.reshape((k, n))).dot(Q_dot))
# print(inner - inner2)
mat = (np.diag(np.diag(A.dot(ground_truth)))) - A * ground_truth
# print(np.sort(np.linalg.eigvals(mat)))
# Check for one inequality in Song Mei's paper
Q = Q / np.max(np.linalg.norm(Q, 2, axis=1))
AQ = A.dot(Q)
u = np.random.random_sample((n, k))
u = u / | np.linalg.norm(u, 2) | numpy.linalg.norm |
"""
Created on Mon Mar 7 21:26:41 2016
<EMAIL>
@author: ksxuu
"""
import numpy as np
import contextlib
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib import rc, cm
import matplotlib as mlp
from numpy.ma import masked_array
from matplotlib.colors import Normalize
import matplotlib.gridspec as gridspec
from matplotlib import rc, cm
import csv
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from mpl_toolkits.axes_grid1 import make_axes_locatable
plt.rcParams['mathtext.sf'] = 'Arial'
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.sans-serif'] = 'Arial'
fsize = 10
line_width = 0.7
#change the font size of xtick and ytick
plt.rc('xtick',labelsize=14)
plt.rc('ytick', labelsize=14)
# define the fucntion to read spike train using CSV_reader
def read_csv_file(filename):
"""Reads a CSV file and return it as a list of rows."""
data = []
for row in csv.reader(open(filename),delimiter=','):
# transfer the string type to the float type
row=list((float(x) for x in row))
data.append(row)
return data
Xvals=read_csv_file('Data/Xvals.txt' )
Xvals2=read_csv_file('Data/Xvals2.txt' )
X1vals=np.array([x[0] for x in Xvals])
Yvals=read_csv_file('Data/Yvals.txt' )
Yvals2=read_csv_file('Data/Yvals2.txt' )
Cvals= | np.loadtxt('Data/Cvals.txt') | numpy.loadtxt |
# Code adapted from https://github.com/mrharicot/monodepth, with the following licence
# Copyright © Niantic, Inc. 2018. Patent Pending.
# All rights reserved.
# ================================================================================
# This Software is licensed under the terms of the UCLB ACP-A Licence which allows
# for non-commercial use only. For any other use of the software not covered by
# the terms of this licence, please contact <EMAIL>
# ================================================================================
# UCLB ACP-A Licence
# This Agreement is made by and between the Licensor and the Licensee as defined
# and identified below.
# 1. Definitions.
# In this Agreement (“the Agreement”) the following words shall have the
# following meanings:
# "Authors" shall mean <NAME>, <NAME>, <NAME>
# "Licensee" Shall mean the person or organisation agreeing to use the
# Software in accordance with these terms and conditions.
# "Licensor" shall mean UCL Business PLC whose registered office is at The
# Network Building, 97 Tottenham Court Road, London W1T 4TP. UCL Business is a
# the technology transfer arm of University College London (UCL).
# "Owner" shall mean Niantic Inc., a company organised and existing under the
# laws of Delaware, whose principal place of business is at 2 Bryant Street,
# #220, San Francisco, 94105. Owner is a third party beneficiary of this
# Agreement and may enforce its terms as if it were a party to this Agreement.
# "Software" shall mean the MonoDepth Software in source code or object code
# form and any accompanying documentation.
# 2. License.
# 2.1 The Licensor has all necessary rights to grant a licence under: (i)
# copyright and rights in the nature of copyright subsisting in the Software;
# and (ii) patent rights resulting from a patent application filed by the
# Licensor in the United Kingdom in connection with the Software. The Licensor
# grants the Licensee for the duration of this Agreement, a free of charge,
# non-sublicenseable, non-exclusive, non-transferable copyright and patent
# licence (in consequence of said patent application) to use the Software for
# non-commercial purpose only, including teaching and research at educational
# institutions and research at not-for-profit research institutions in
# accordance with the provisions of this Agreement. Non-commercial use
# expressly excludes any profit-making or commercial activities, including
# without limitation sale, licence, manufacture or development of commercial
# products, use in commercially-sponsored research, provision of consulting
# service, use for or on behalf of any commercial entity, and use in research
# where a commercial party obtains rights to research results or any other
# benefit. Any use of the Software for any purpose other than non-commercial
# research shall automatically terminate this Licence.
# 2.2 The Licensee is permitted to make modifications to the Software provided
# that distribution of such modifications is in accordance with Clause 3.
# 2.3 Except as expressly permitted by this Agreement and save to the extent
# and in the circumstances expressly required to be permitted by law, the
# Licensee is not permitted to rent, lease, sell, offer to sell or loan the
# Software or its associated documentation.
# 3. Redistribution and modifications
# 3.1 The Licensee may reproduce and distribute copies of the Software only to
# this same GitHub repository with or without modifications, in source format
# only and provided that any and every distribution is accompanied by an
# unmodified copy of this Licence and that the following copyright notice is
# always displayed in an obvious manner: Copyright © Niantic, Inc. 2018. All
# rights reserved.
# 3.2 In the case where the Software has been modified, any distribution must
# include prominent notices indicating which files have been changed.
# 3.3 The Licensee shall cause any work that it distributes or publishes, that
# in whole or in part contains or is derived from the Software or any part
# thereof (“Work based on the Software”), to be licensed as a whole at no
# charge to all third parties under the terms of this Licence.
# 4. Duration.
# This Agreement is effective until the Licensee terminates it by destroying
# the Software and its documentation together with all copies. It will also
# terminate automatically if the Licensee fails to abide by its terms. Upon
# automatic termination the Licensee agrees to destroy all copies of the
# Software and its documentation.
# 5. Disclaimer of Warranties.
# The Software is provided as is. To the maximum extent permitted by law,
# Licensor provides no warranties or conditions of any kind, either express or
# implied, including without limitation, any warranties or condition of title,
# non-infringement or fitness for a particular purpose.
# 6. Limitation of Liability.
# In no event shall the Licensor and/or Authors be liable for any direct,
# indirect, incidental, special, exemplary or consequential damages (including
# but not limited to, procurement of substitute goods or services; loss of
# use, data or profits; or business interruption) however caused and on any
# theory of liability, whether in contract, strict liability, or tort
# (including negligence or otherwise) arising in any way out of the use of
# this Software, even if advised of the possibility of such damage.
# 7. Indemnity.
# The Licensee shall indemnify the Licensor and/or Authors against all third
# party claims that may be asserted against or suffered by the Licensor and/or
# Authors and which relate to use of the Software by the Licensee or the
# Recipient.
# 8. Intellectual Property.
# 8.1 As between the Licensee and Licensor,copyright and all other
# intellectual property rights subsisting in or in connection with the
# Software and supporting information shall remain at all times the property
# of the Licensor but Licensee acknowledges and agrees that Owner is the owner
# of all right, title and interest in and to the Software. The Licensee shall
# acquire no rights in any such material except as expressly provided in this
# Agreement.
# 8.2 No permission is granted to use the trademarks or product names of the
# Licensor or Owner except as required for reasonable and customary use in
# describing the origin of the Software and for the purposes of abiding by the
# terms of Clause 3.1.
# 8.3 The Licensee shall promptly notify the Licensor, in sufficient detail,
# all improvements and new uses of the Software (“Improvements”). The Licensor
# and its affiliates shall have a non-exclusive, fully paid-up, royalty-free,
# irrevocable and perpetual licence under the Improvements for non-commercial
# academic research and teaching purposes.
# 8.4 The Licensee grants an exclusive first option to the Owner to be
# exercised by the Owner within three (3) years of the date of notification of
# the Improvements under Clause 8.3 to use any Improvements for commercial
# purposes on terms to be negotiated and agreed by Licensee and Owner in good
# faith within a period of six (6) months from the date of exercise of the
# said option (including without limitation any royalty share in net income
# from such commercialization payable to the Licensee, as the case may be).
# 9. Acknowledgements.
# The Licensee shall acknowledge the Authors and use of the Software in the
# publication of any work that uses, or results that are achieved through, the
# use of the Software. The following citation shall be included in the
# acknowledgement: “Unsupervised Monocular Depth Estimation with Left-Right
# Consistency, by <NAME>, <NAME>, <NAME>, CVPR 2017.”
# 10. Governing Law.
# This Agreement shall be governed by, construed and interpreted in accordance
# with English law and the parties submit to the exclusive jurisdiction of the
# English courts.
# 11. Termination.
# Upon termination of this Agreement, the licenses granted hereunder will
# terminate and Sections 5, 6, 7, 8, 9, 10 and 11 shall survive any
# termination of this Agreement.
import torch
from torch.utils.data import Dataset
import numpy as np
from scipy.misc import imread, imresize
from collections import Counter
from scipy.interpolate import LinearNDInterpolator as LinearNDInterpolator
import extract_depth
def read_calib_file(path):
# taken from https://github.com/hunse/kitti
float_chars = set("0123456789.e+- ")
data = {}
with open(path, 'r') as f:
for line in f.readlines():
key, value = line.split(':', 1)
value = value.strip()
data[key] = value
if float_chars.issuperset(value):
# try to cast to float array
try:
data[key] = np.array(list(map(float, value.split(' '))))
except ValueError:
# casting error: data[key] already eq. value, so pass
pass
return data
def load_velodyne_points(file_name):
# adapted from https://github.com/hunse/kitti
points = np.fromfile(file_name, dtype=np.float32).reshape(-1, 4)
points[:, 3] = 1.0 # homogeneous
return points
def lin_interp(shape, xyd):
# taken from https://github.com/hunse/kitti
m, n = shape
ij, d = xyd[:, 1::-1], xyd[:, 2]
f = LinearNDInterpolator(ij, d, fill_value=0)
J, I = np.meshgrid(np.arange(n), np.arange(m))
IJ = np.vstack([I.flatten(), J.flatten()]).T
disparity = f(IJ).reshape(shape)
return disparity
def sub2ind(matrixSize, rowSub, colSub):
m, n = matrixSize
return rowSub * (n-1) + colSub - 1
def get_focal_length_baseline(calib_dir, cam):
cam2cam = read_calib_file(calib_dir + '/calib_cam_to_cam.txt')
P2_rect = cam2cam['P_rect_02'].reshape(3,4)
P3_rect = cam2cam['P_rect_03'].reshape(3,4)
# cam 2 is left of camera 0 -6cm
# cam 3 is to the right +54cm
b2 = P2_rect[0,3] / -P2_rect[0,0]
b3 = P3_rect[0,3] / -P3_rect[0,0]
baseline = b3-b2
if cam==2:
focal_length = P2_rect[0,0]
elif cam==3:
focal_length = P3_rect[0,0]
return focal_length, baseline
def get_depth(calib_dir, velo_file_name, im_shape, cam=2, interp=False, vel_depth=False, inv_depth=False):
# load calibration files
cam2cam = read_calib_file(calib_dir + '/calib_cam_to_cam.txt')
velo2cam = read_calib_file(calib_dir + '/calib_velo_to_cam.txt')
velo2cam = np.hstack((velo2cam['R'].reshape(3,3), velo2cam['T'][..., np.newaxis]))
velo2cam = np.vstack((velo2cam, np.array([0, 0, 0, 1.0])))
# compute projection matrix velodyne->image plane
R_cam2rect = np.eye(4)
R_cam2rect[:3,:3] = cam2cam['R_rect_00'].reshape(3,3)
P_rect = cam2cam['P_rect_0'+str(cam)].reshape(3,4)
P_velo2im = np.dot( | np.dot(P_rect, R_cam2rect) | numpy.dot |
import numpy as np
from numpy.linalg import inv
from numpy.linalg import norm
from joblib import Parallel, delayed
from multiprocessing import Process, Manager, cpu_count, Pool
class SolveIndividual:
def solve(self, A, b, nu, rho, Z):
t1 = A.dot(A.T)
A = A.reshape(-1, 1)
tX = (A * b + rho * Z - nu) / (t1 + rho)
return tX
class CombineSolution:
def combine(self, nuBar, xBar, Z, rho):
t = nuBar.reshape(-1, 1)
t = t + rho * (xBar.reshape(-1, 1) - Z)
return t.T
class ADMM:
def __init__(self, A, b, parallel=False):
self.D = A.shape[1]
self.N = A.shape[0]
if parallel:
self.XBar = np.zeros((self.N, self.D))
self.nuBar = np.zeros((self.N, self.D))
self.nu = np.zeros((self.D, 1))
self.rho = 1
self.X = np.random.randn(self.D, 1)
self.Z = np.zeros((self.D, 1))
self.A = A
self.b = b
self.alpha = 0.01
self.parallel = parallel
self.numberOfThreads = cpu_count()
def step(self):
if self.parallel:
return self.step_parallel()
# Solve for X_t+1
self.X = inv(self.A.T.dot(self.A) + self.rho).dot(self.A.T.dot(self.b) + self.rho * self.Z - self.nu)
# Solve for Z_t+1
self.Z = self.X + self.nu / self.rho - (self.alpha / self.rho) * np.sign(self.Z)
# Combine
self.nu = self.nu + self.rho * (self.X - self.Z)
def solveIndividual(self, i):
solve = SolveIndividual()
return solve.solve(self.A[i], np.asscalar(self.b[i]), self.nuBar[i].reshape(-1, 1), self.rho, self.Z)
def combineSolution(self, i):
combine = CombineSolution()
return combine.combine(self.nuBar[i].reshape(-1, 1), self.XBar[i].reshape(-1, 1), self.Z, self.rho)
def step_parallel(self):
# Solve for X_t+1
# Parallel(n_jobs = self.numberOfThreads, backend = "threading")(
# delayed(self.solveIndividual)(i) for i in range(0, self.N-1))
process = []
for i in range(0, self.N - 1):
p = Process(target=self.solveIndividual, args=(i,))
p.start()
process.append(p)
for p in process:
p.join()
self.X = np.average(self.XBar, axis=0)
self.nu = np.average(self.nuBar, axis=0)
self.X = self.X.reshape(-1, 1)
self.nu = self.nu.reshape(-1, 1)
# Solve for Z_t+1
self.Z = self.X + self.nu / self.rho - (self.alpha / self.rho) * np.sign(self.Z)
# Combine
# Parallel(n_jobs = self.numberOfThreads, backend = "threading")(
# delayed(self.combineSolution)(i) for i in range(0, self.N-1))
process = []
for i in range(0, self.N - 1):
p = Process(target=self.combineSolution, args=(i,))
p.start()
process.append(p)
for p in process:
p.join()
def step_iterative(self):
# Solve for X_t+1
for i in range(0, self.N - 1):
t = self.solveIndividual(i)
self.XBar[i] = t.T
self.X = np.average(self.XBar, axis=0)
self.nu = np.average(self.nuBar, axis=0)
self.X = self.X.reshape(-1, 1)
self.nu = self.nu.reshape(-1, 1)
# Solve for Z_t+1
self.Z = self.X + self.nu / self.rho - (self.alpha / self.rho) * np.sign(self.Z)
# Combine
for i in range(0, self.N - 1):
t = self.nuBar[i].reshape(-1, 1)
t = t + self.rho * (self.XBar[i].reshape(-1, 1) - self.Z)
self.nuBar[i] = t.T
def LassoObjective(self):
return 0.5 * norm(self.A.dot(self.X) - self.b) ** 2 + self.alpha * norm(self.X, 1)
def main():
num_iterations = 20
N = 100
D = 20
A = | np.random.randn(N, D) | numpy.random.randn |
"""
Tests available cost function classes in FitBenchmarking.
"""
from unittest import TestCase
import numpy as np
from fitbenchmarking.cost_func.cost_func_factory import create_cost_func
from fitbenchmarking.cost_func.hellinger_nlls_cost_func import \
HellingerNLLSCostFunc
from fitbenchmarking.cost_func.nlls_cost_func import NLLSCostFunc
from fitbenchmarking.cost_func.poisson_cost_func import (PoissonCostFunc,
_safe_a_log_b)
from fitbenchmarking.cost_func.weighted_nlls_cost_func import \
WeightedNLLSCostFunc
from fitbenchmarking.hessian.analytic_hessian import Analytic
from fitbenchmarking.jacobian.scipy_jacobian import Scipy
from fitbenchmarking.parsing.fitting_problem import FittingProblem
from fitbenchmarking.utils import exceptions
from fitbenchmarking.utils.options import Options
# pylint: disable=attribute-defined-outside-init
def fun(x, p):
"""
Analytic function evaluation
"""
return (x*p**2)**2
def jac(x, p):
"""
Analytic Jacobian evaluation
"""
return np.column_stack((4*x**2*p[0]**3,
4*x**2*p[0]**3))
def hes(x, p):
"""
Analytic Hessian evaluation
"""
return np.array([[12*x**2*p[0]**2, 12*x**2*p[0]**2],
[12*x**2*p[0]**2, 12*x**2*p[0]**2], ])
class TestNLLSCostFunc(TestCase):
"""
Class to test the NLLSCostFunc class
"""
def setUp(self):
"""
Setting up nonlinear least squares cost function tests
"""
self.options = Options()
fitting_problem = FittingProblem(self.options)
self.cost_function = NLLSCostFunc(fitting_problem)
fitting_problem.function = lambda x, p1: x + p1
self.x_val = np.array([1, 8, 11])
self.y_val = np.array([6, 10, 20])
def test_eval_r_raise_error(self):
"""
Test that eval_r raises and error
"""
self.assertRaises(exceptions.CostFuncError,
self.cost_function.eval_r,
params=[1, 2, 3],
x=[2],
y=[3, 4])
def test_eval_r_correct_evaluation(self):
"""
Test that eval_r is running the correct function
"""
eval_result = self.cost_function.eval_r(x=self.x_val,
y=self.y_val,
params=[5])
self.assertTrue(all(eval_result == np.array([0, -3, 4])))
def test_eval_cost(self):
"""
Test that eval_cost is correct
"""
eval_result = self.cost_function.eval_cost(params=[5],
x=self.x_val,
y=self.y_val)
self.assertEqual(eval_result, 25)
def test_validate_algorithm_type_error(self):
"""
Test that validate_algorithm_type raises an error
for incompatible options
"""
self.cost_function.invalid_algorithm_types = ['ls']
algorithm_check = {'ls': ['ls-min']}
minimizer = 'ls-min'
self.assertRaises(exceptions.IncompatibleMinimizerError,
self.cost_function.validate_algorithm_type,
algorithm_check=algorithm_check,
minimizer=minimizer)
def test_validate_algorithm_type_correct(self):
"""
Test that validate_algorithm_type does not raise
an error for compaitble options
"""
self.cost_function.invalid_algorithm_types = []
algorithm_check = {'ls': ['ls-min']}
minimizer = 'ls-min'
self.cost_function.validate_algorithm_type(algorithm_check, minimizer)
def test_jac_res(self):
"""
Test that jac_res works for the NLLs cost function
"""
jacobian = Scipy(self.cost_function.problem)
jacobian.method = "2-point"
self.cost_function.jacobian = jacobian
J = self.cost_function.jac_res(params=[5],
x=self.x_val,
y=self.y_val)
expected = np.array([[-1.0], [-1.0], [-1.0]])
self.assertTrue(np.allclose(J, expected))
def test_jac_cost(self):
"""
Test that jac_cost works for the NLLs cost function
"""
jacobian = Scipy(self.cost_function.problem)
jacobian.method = "2-point"
self.cost_function.jacobian = jacobian
jac_cost = self.cost_function.jac_cost(params=[5],
x=self.x_val,
y=self.y_val)
expected = np.array([-2.0])
self.assertTrue(np.allclose(jac_cost, expected))
def test_hes_res(self):
"""
Test that hes_res works for the NLLs cost function
"""
self.cost_function.problem.function = fun
self.cost_function.problem.jacobian = jac
self.cost_function.problem.hessian = hes
jacobian = Scipy(self.cost_function.problem)
jacobian.method = "2-point"
self.cost_function.jacobian = jacobian
hessian = Analytic(self.cost_function.problem,
self.cost_function.jacobian)
self.cost_function.hessian = hessian
H, _ = self.cost_function.hes_res(params=[5],
x=self.x_val,
y=self.y_val)
expected = np.array([[[-300, -19200, -36300],
[-300, -19200, -36300]],
[[-300, -19200, -36300],
[-300, -19200, -36300]]])
self.assertTrue(np.allclose(H, expected))
def test_hes_cost(self):
"""
Test that hes_cost works for the NLLs cost function
"""
self.cost_function.problem.function = fun
self.cost_function.problem.jacobian = jac
self.cost_function.problem.hessian = hes
jacobian = Scipy(self.cost_function.problem)
jacobian.method = "2-point"
self.cost_function.jacobian = jacobian
hessian = Analytic(self.cost_function.problem,
self.cost_function.jacobian)
self.cost_function.hessian = hessian
hes_cost = self.cost_function.hes_cost(params=[0.01],
x=self.x_val,
y=self.y_val)
expected = np.array([[-7.35838895, -7.35838895],
[-7.35838895, -7.35838895]])
self.assertTrue(np.allclose(hes_cost, expected))
class TestWeightedNLLSCostFunc(TestCase):
"""
Class to test the WeightedNLLSCostFunc class
"""
def setUp(self):
"""
Setting up weighted nonlinear least squares cost function tests
"""
self.options = Options()
fitting_problem = FittingProblem(self.options)
self.cost_function = WeightedNLLSCostFunc(fitting_problem)
fitting_problem.function = lambda x, p1: x + p1
self.x_val = np.array([1, 8, 11])
self.y_val = np.array([6, 10, 20])
self.e_val = np.array([2, 4, 1])
def test_eval_r_raise_error(self):
"""
Test that eval_r raises and error
"""
self.assertRaises(exceptions.CostFuncError,
self.cost_function.eval_r,
params=[1, 2, 3],
x=[2],
y=[3, 4, 5],
e=[23, 4])
def test_eval_r_correct_evaluation(self):
"""
Test that eval_r is running the correct function
"""
eval_result = self.cost_function.eval_r(x=self.x_val,
y=self.y_val,
e=self.e_val,
params=[5])
self.assertTrue(all(eval_result == np.array([0, -0.75, 4])))
def test_eval_cost(self):
"""
Test that eval_cost is correct
"""
eval_result = self.cost_function.eval_cost(params=[5],
x=self.x_val,
y=self.y_val,
e=self.e_val)
self.assertEqual(eval_result, 16.5625)
def test_jac_res(self):
"""
Test that jac_res works for the Weighted NLLs cost function
"""
jacobian = Scipy(self.cost_function.problem)
jacobian.method = "2-point"
self.cost_function.jacobian = jacobian
J = self.cost_function.jac_res(params=[5],
x=self.x_val,
y=self.y_val,
e=self.e_val)
expected = np.array([[-0.5], [-0.25], [-1.0]])
self.assertTrue(np.allclose(J, expected))
def test_hes_res(self):
"""
Test that hes_res works for the Weighted NLLs cost function
"""
self.cost_function.problem.function = fun
self.cost_function.problem.jacobian = jac
self.cost_function.problem.hessian = hes
jacobian = Scipy(self.cost_function.problem)
jacobian.method = "2-point"
self.cost_function.jacobian = jacobian
hessian = Analytic(self.cost_function.problem,
self.cost_function.jacobian)
self.cost_function.hessian = hessian
H, _ = self.cost_function.hes_res(params=[5],
x=self.x_val,
y=self.y_val,
e=self.e_val)
expected = np.array([[[-150, -4800, -36300],
[-150, -4800, -36300]],
[[-150, -4800, -36300],
[-150, -4800, -36300]]])
self.assertTrue(np.allclose(H, expected))
class TestHellingerNLLSCostFunc(TestCase):
"""
Class to test the HellingerNLLSCostFunc class
"""
def setUp(self):
"""
Setting up root nonlinear least squares cost function tests
"""
self.options = Options()
fitting_problem = FittingProblem(self.options)
self.cost_function = HellingerNLLSCostFunc(fitting_problem)
fitting_problem.function = lambda x, p1: x + p1
self.x_val = np.array([1, 8, 11])
self.y_val = np.array([6, 10, 20])
def test_eval_r_raise_error(self):
"""
Test that eval_r raises and error
"""
self.assertRaises(exceptions.CostFuncError,
self.cost_function.eval_r,
params=[1, 2, 3],
x=[2],
y=[3, 4, 5])
def test_eval_r_correct_evaluation(self):
"""
Test that eval_r is running the correct function
"""
eval_result = self.cost_function.eval_r(x=self.x_val,
y=self.y_val,
params=[0])
expected = np.array([1.4494897427831779,
0.33385053542218923,
1.1555111646441798])
self.assertTrue(
all(eval_result == expected))
def test_eval_cost(self):
"""
Test that eval_cost is correct
"""
eval_result = self.cost_function.eval_cost(params=[5],
x=self.x_val,
y=self.y_val)
self.assertEqual(eval_result, 0.4194038580206052)
def test_jac_res(self):
"""
Test that jac_res works for the Hellinger NLLs cost function
"""
jacobian = Scipy(self.cost_function.problem)
jacobian.method = "2-point"
self.cost_function.jacobian = jacobian
J = self.cost_function.jac_res(params=[5],
x=self.x_val,
y=self.y_val)
expected = np.array([[-0.20412415],
[-0.13867504],
[-0.125]])
self.assertTrue(np.allclose(J, expected))
def test_hes_res(self):
"""
Test that hes_res works for the Hellinger NLLs cost function
"""
self.cost_function.problem.function = fun
self.cost_function.problem.jacobian = jac
self.cost_function.problem.hessian = hes
jacobian = Scipy(self.cost_function.problem)
jacobian.method = "2-point"
self.cost_function.jacobian = jacobian
hessian = Analytic(self.cost_function.problem,
self.cost_function.jacobian)
self.cost_function.hessian = hessian
H, _ = self.cost_function.hes_res(params=[5],
x=self.x_val,
y=self.y_val)
expected = np.array([[[-1, -15, -21],
[-1, -15, -21]],
[[-1, -15, -21],
[-1, -15, -21]]])
self.assertTrue(np.allclose(H, expected))
class TestPoissonCostFunc(TestCase):
"""
Class to test the PoissonCostFunc class
"""
def setUp(self):
"""
Setting up poisson cost function tests
"""
self.options = Options()
fitting_problem = FittingProblem(self.options)
self.cost_function = PoissonCostFunc(fitting_problem)
fitting_problem.function = lambda x, p1: x + p1
self.x_val = np.array([1, 8, 11]) # 6, 13, 16
self.y_val = np.array([6, 10, 20])
def test_eval_cost_raise_error(self):
"""
Test that eval_cost raises an error if inputs are bad.
"""
with self.assertRaises(exceptions.CostFuncError):
_ = self.cost_function.eval_cost(
params=[5],
x=[2],
y=[1, 3, 5])
def test_eval_cost_correct(self):
"""
Test that the eval cost function returns the correct value
"""
eval_result = self.cost_function.eval_cost(params=[5],
x=self.x_val,
y=self.y_val)
# 6*(log(6) - log(6))
# + 10*(log(10) - log(13))
# + 20*(log(20) - log(16))
# - (6 - 6) - (10 - 13) - (20 - 16)
# == 30*log(5) - 10*log(13) - 30*log(2) - 1
self.assertEqual(eval_result, 0.8392283816092849)
def test_safe_a_log_b(self):
"""
Test the safe_a_log_b function.
"""
a = np.array([1, 2, 3, 0, 5])
b = np.array([1, 2, 3, 4, 5])
res = _safe_a_log_b(a, b)
self.assertTrue(np.isclose(res,
np.array([0.0,
2*np.log(2),
3*np.log(3),
0.0,
5*np.log(5)])
).all())
def test_jac_res(self):
"""
Test that jac_res works for the Poisson cost function
"""
jacobian = Scipy(self.cost_function.problem)
jacobian.method = "2-point"
self.cost_function.jacobian = jacobian
J = self.cost_function.jac_res(params=[5],
x=self.x_val,
y=self.y_val)
expected = np.array([[0.0],
[0.23076923],
[-0.25]])
self.assertTrue(np.allclose(J, expected))
def test_hes_res(self):
"""
Test that hes_res works for the Poisson NLLs cost function
"""
self.cost_function.problem.function = fun
self.cost_function.problem.jacobian = jac
self.cost_function.problem.hessian = hes
jacobian = Scipy(self.cost_function.problem)
jacobian.method = "2-point"
self.cost_function.jacobian = jacobian
hessian = Analytic(self.cost_function.problem,
self.cost_function.jacobian)
self.cost_function.hessian = hessian
H, _ = self.cost_function.hes_res(params=[5],
x=self.x_val,
y=self.y_val)
expected = np.array([[[300, 19201, 36303],
[300, 19201, 36303]],
[[300, 19201, 36303],
[300, 19201, 36303]]])
self.assertTrue( | np.allclose(H, expected) | numpy.allclose |
# -*- coding: utf-8 -*-
import unittest
import numpy
import clpy
from clpy.backend.opencl.clblast import clblast
from clpy.core import core
import functools
def for_each_dtype_and_blasfunc_pair(pairs):
def decorator(impl):
@functools.wraps(impl)
def test_func(self, *args, **kwargs):
for pair in pairs:
try:
kwargs['dtype'] = numpy.dtype(pair[0]).type
kwargs['blasfunc'] = pair[1]
impl(self, *args, **kwargs)
except Exception:
print('dtype:', pair[0], ", blasfunc:", pair[1])
raise
return test_func
return decorator
GEMM_pairs = [
('float32', clblast.sgemm),
('float64', clblast.dgemm),
]
class TestBlas3GEMM(unittest.TestCase):
"""test class of CLBlast BLAS-3 GEMM functions"""
@for_each_dtype_and_blasfunc_pair(GEMM_pairs)
def test_row_matrix_row_matrix(self, dtype, blasfunc):
npA = numpy.array([
[1, 2, 3],
[4, 5, 6]], dtype=dtype) # row-major
npB = numpy.array([
[10, 11],
[13, 14],
[16, 17]], dtype=dtype) # row-major
transa = 0 # A is not transposed in c-style(row-major)
transb = 0 # B is not transposed in c-style(row-major)
m = npA.shape[0] # op(A) rows = (A in row-major) rows = C rows
n = npB.shape[1] # op(B) cols = (B in row-major) cols = C cols
k = npA.shape[1] # op(A) cols = (A in row-major) cols = op(B) rows
clpA = clpy.ndarray(npA.shape, dtype=dtype)
clpA.set(npA)
clpB = clpy.ndarray(npB.shape, dtype=dtype)
clpB.set(npB)
clpC = clpy.ndarray((n, m), dtype=dtype) # column-major
clpA, transa, lda = core._mat_to_cublas_contiguous(
clpA, transa) # as cublas-style
clpB, transb, ldb = core._mat_to_cublas_contiguous(
clpB, transb) # as cublas-style
ldc = clpC.shape[1]
blasfunc('C', transa, transb, m, n, k,
1.0, clpA, lda,
clpB, ldb,
0.0, clpC, ldc
)
actualC = clpC.get().T # as row-major
expectedC = numpy.dot(npA, npB) # row-major caluculation
self.assertTrue(numpy.allclose(expectedC, actualC))
@for_each_dtype_and_blasfunc_pair(GEMM_pairs)
def test_row_matrix_row_vector(self, dtype, blasfunc):
npA = numpy.array([
[1, 2],
[4, 5],
[7, 8]], dtype=dtype) # row-major
npB = numpy.array([
[10],
[13]], dtype=dtype) # row-major
transa = 0 # A is not transposed in c-style(row-major)
transb = 0 # B is not transposed in c-style(row-major)
m = npA.shape[0] # op(A) rows = (A in row-major) rows = C rows
n = npB.shape[1] # op(B) cols = (B in row-major) cols = C cols
k = npA.shape[1] # op(A) cols = (A in row-major) cols = op(B) rows
clpA = clpy.ndarray(npA.shape, dtype=dtype)
clpA.set(npA)
clpB = clpy.ndarray(npB.shape, dtype=dtype)
clpB.set(npB)
clpC = clpy.ndarray((n, m), dtype=dtype) # column-major
clpA, transa, lda = core._mat_to_cublas_contiguous(
clpA, transa) # as cublas-style
clpB, transb, ldb = core._mat_to_cublas_contiguous(
clpB, transb) # as cublas-style
ldc = clpC.shape[1]
blasfunc('C', transa, transb, m, n, k,
1.0, clpA, lda,
clpB, ldb,
0.0, clpC, ldc
)
actualC = clpC.get().T # as row-major
expectedC = numpy.dot(npA, npB) # row-major caluculation
self.assertTrue(numpy.allclose(expectedC, actualC))
m = npA.shape[0] # op(A) rows = (A in row-major) rows = C rows
@for_each_dtype_and_blasfunc_pair(GEMM_pairs)
def test_row_vector_row_matrix(self, dtype, blasfunc):
npA = numpy.array([
[10, 13, 16]
], dtype=dtype) # row-major
npB = numpy.array([
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], dtype=dtype) # row-major
transa = 0 # A is not transposed in c-style(row-major)
transb = 0 # B is not transposed in c-style(row-major)
m = npA.shape[0] # op(A) rows = (A in row-major) rows = C rows
n = npB.shape[1] # op(B) cols = (B in row-major) cols = C cols
k = npA.shape[1] # op(A) cols = (A in row-major) cols = op(B) rows
clpA = clpy.ndarray(npA.shape, dtype=dtype)
clpA.set(npA)
clpB = clpy.ndarray(npB.shape, dtype=dtype)
clpB.set(npB)
clpC = clpy.ndarray((n, m), dtype=dtype) # column-major
clpA, transa, lda = core._mat_to_cublas_contiguous(
clpA, transa) # as cublas-style
clpB, transb, ldb = core._mat_to_cublas_contiguous(
clpB, transb) # as cublas-style
ldc = clpC.shape[1]
blasfunc('C', transa, transb, m, n, k,
1.0, clpA, lda,
clpB, ldb,
0.0, clpC, ldc
)
actualC = clpC.get().T # as row-major
expectedC = numpy.dot(npA, npB) # row-major caluculation
self.assertTrue(numpy.allclose(expectedC, actualC))
@for_each_dtype_and_blasfunc_pair(GEMM_pairs)
def test_column_matrix_column_matrix(self, dtype, blasfunc):
npA = numpy.array([
[1, 2, 3],
[4, 5, 6]], dtype=dtype) # column-major
# 1 4
# 2 5
# 3 6
npB = numpy.array([
[10, 11],
[13, 14],
[16, 17]], dtype=dtype) # column-major
# 10 13 16
# 11 14 17
transa = 1 # A is transposed in c-style(row-major)
transb = 1 # B is transposed in c-style(row-major)
m = npA.shape[1] # op(A) rows = (A in column-major) rows = C rows
n = npB.shape[0] # op(B) cols = (B in column-major) cols = C cols
k = npA.shape[0] # op(A) cols = (A in column-major) cols = op(B) rows
clpA = clpy.ndarray(npA.shape, dtype=dtype)
clpA.set(npA)
clpB = clpy.ndarray(npB.shape, dtype=dtype)
clpB.set(npB)
clpC = clpy.ndarray((n, m), dtype=dtype) # column-major
clpA, transa, lda = core._mat_to_cublas_contiguous(
clpA, transa) # as cublas-style
clpB, transb, ldb = core._mat_to_cublas_contiguous(
clpB, transb) # as cublas-style
ldc = clpC.shape[1]
blasfunc('C', transa, transb, m, n, k,
1.0, clpA, lda,
clpB, ldb,
0.0, clpC, ldc
)
actualC = clpC.get().T # as row-major
expectedC = numpy.dot(npA.T, npB.T) # row-major caluculation
self.assertTrue(numpy.allclose(expectedC, actualC))
@for_each_dtype_and_blasfunc_pair(GEMM_pairs)
def test_column_matrix_column_vector(self, dtype, blasfunc):
npA = numpy.array([
[1, 2, 3],
[4, 5, 6]], dtype=dtype) # column-major
# 1 4
# 2 5
# 3 6
npB = numpy.array([
[10, 11]], dtype=dtype) # column-major
# 10
# 11
transa = 1 # A is transposed in c-style(row-major)
transb = 1 # B is transposed in c-style(row-major)
m = npA.shape[1] # op(A) rows = (A in column-major) rows = C rows
n = npB.shape[0] # op(B) cols = (B in column-major) cols = C cols
k = npA.shape[0] # op(A) cols = (A in column-major) cols = op(B) rows
clpA = clpy.ndarray(npA.shape, dtype=dtype)
clpA.set(npA)
clpB = clpy.ndarray(npB.shape, dtype=dtype)
clpB.set(npB)
clpC = clpy.ndarray((n, m), dtype=dtype) # column-major
clpA, transa, lda = core._mat_to_cublas_contiguous(
clpA, transa) # as cublas-style
clpB, transb, ldb = core._mat_to_cublas_contiguous(
clpB, transb) # as cublas-style
ldc = clpC.shape[1]
blasfunc('C', transa, transb, m, n, k,
1.0, clpA, lda,
clpB, ldb,
0.0, clpC, ldc
)
actualC = clpC.get().T # as row-major
expectedC = numpy.dot(npA.T, npB.T) # row-major caluculation
self.assertTrue(numpy.allclose(expectedC, actualC))
@for_each_dtype_and_blasfunc_pair(GEMM_pairs)
def test_column_vector_column_matrix(self, dtype, blasfunc):
npA = numpy.array([
[1],
[4]], dtype=dtype) # column-major
# 1 4
npB = numpy.array([
[10, 11],
[13, 14],
[16, 17]], dtype=dtype) # column-major
# 10 13 16
# 11 14 17
transa = 1 # A is transposed in c-style(row-major)
transb = 1 # B is transposed in c-style(row-major)
m = npA.shape[1] # op(A) rows = (A in column-major) rows = C rows
n = npB.shape[0] # op(B) cols = (B in column-major) cols = C cols
k = npA.shape[0] # op(A) cols = (A in column-major) cols = op(B) rows
clpA = clpy.ndarray(npA.shape, dtype=dtype)
clpA.set(npA)
clpB = clpy.ndarray(npB.shape, dtype=dtype)
clpB.set(npB)
clpC = clpy.ndarray((n, m), dtype=dtype) # column-major
clpA, transa, lda = core._mat_to_cublas_contiguous(
clpA, transa) # as cublas-style
clpB, transb, ldb = core._mat_to_cublas_contiguous(
clpB, transb) # as cublas-style
ldc = clpC.shape[1]
blasfunc('C', transa, transb, m, n, k,
1.0, clpA, lda,
clpB, ldb,
0.0, clpC, ldc
)
actualC = clpC.get().T # as row-major
expectedC = numpy.dot(npA.T, npB.T) # row-major caluculation
self.assertTrue(numpy.allclose(expectedC, actualC))
@for_each_dtype_and_blasfunc_pair(GEMM_pairs)
def test_row_matrix_column_matrix(self, dtype, blasfunc):
npA = numpy.array([
[1, 2],
[4, 5]], dtype=dtype) # row-major
# 1 2
# 4 5
npB = numpy.array([
[10, 11],
[13, 14],
[16, 17]], dtype=dtype) # column-major
# 10 13 16
# 11 14 17
transa = 0 # A is not transposed in c-style(row-major)
transb = 1 # B is transposed in c-style(row-major)
m = npA.shape[0] # op(A) rows = (A in row-major) rows = C rows
n = npB.shape[0] # op(B) cols = (B in column-major) cols = C cols
k = npA.shape[1] # op(A) cols = (A in row-major) cols = op(B) rows
clpA = clpy.ndarray(npA.shape, dtype=dtype)
clpA.set(npA)
clpB = clpy.ndarray(npB.shape, dtype=dtype)
clpB.set(npB)
clpC = clpy.ndarray((n, m), dtype=dtype) # column-major
clpA, transa, lda = core._mat_to_cublas_contiguous(
clpA, transa) # as cublas-style
clpB, transb, ldb = core._mat_to_cublas_contiguous(
clpB, transb) # as cublas-style
ldc = clpC.shape[1]
blasfunc('C', transa, transb, m, n, k,
1.0, clpA, lda,
clpB, ldb,
0.0, clpC, ldc
)
actualC = clpC.get().T # as row-major
expectedC = numpy.dot(npA, npB.T) # row-major caluculation
self.assertTrue(numpy.allclose(expectedC, actualC))
@for_each_dtype_and_blasfunc_pair(GEMM_pairs)
def test_row_matrix_column_vector(self, dtype, blasfunc):
npA = numpy.array([
[1, 2],
[4, 5]], dtype=dtype) # row-major
# 1 2
# 4 5
npB = numpy.array([
[10, 11]], dtype=dtype) # column-major
# 10
# 11
transa = 0 # A is not transposed in c-style(row-major)
transb = 1 # B is transposed in c-style(row-major)
m = npA.shape[0] # op(A) rows = (A in row-major) rows = C rows
n = npB.shape[0] # op(B) cols = (B in column-major) cols = C cols
k = npA.shape[1] # op(A) cols = (A in row-major) cols = op(B) rows
clpA = clpy.ndarray(npA.shape, dtype=dtype)
clpA.set(npA)
clpB = clpy.ndarray(npB.shape, dtype=dtype)
clpB.set(npB)
clpC = clpy.ndarray((n, m), dtype=dtype) # column-major
clpA, transa, lda = core._mat_to_cublas_contiguous(
clpA, transa) # as cublas-style
clpB, transb, ldb = core._mat_to_cublas_contiguous(
clpB, transb) # as cublas-style
ldc = clpC.shape[1]
blasfunc('C', transa, transb, m, n, k,
1.0, clpA, lda,
clpB, ldb,
0.0, clpC, ldc
)
actualC = clpC.get().T # as row-major
expectedC = numpy.dot(npA, npB.T) # row-major caluculation
self.assertTrue(numpy.allclose(expectedC, actualC))
@for_each_dtype_and_blasfunc_pair(GEMM_pairs)
def test_row_vector_column_matrix(self, dtype, blasfunc):
npA = numpy.array([
[1, 2]], dtype=dtype) # row-major
# 1 2
# 4 5
npB = numpy.array([
[10, 11],
[13, 14],
[16, 17]], dtype=dtype) # column-major
# 10 13 16
# 11 14 17
transa = 0 # A is not transposed in c-style(row-major)
transb = 1 # B is transposed in c-style(row-major)
m = npA.shape[0] # op(A) rows = (A in row-major) rows = C rows
n = npB.shape[0] # op(B) cols = (B in column-major) cols = C cols
k = npA.shape[1] # op(A) cols = (A in row-major) cols = op(B) rows
clpA = clpy.ndarray(npA.shape, dtype=dtype)
clpA.set(npA)
clpB = clpy.ndarray(npB.shape, dtype=dtype)
clpB.set(npB)
clpC = clpy.ndarray((n, m), dtype=dtype) # column-major
clpA, transa, lda = core._mat_to_cublas_contiguous(
clpA, transa) # as cublas-style
clpB, transb, ldb = core._mat_to_cublas_contiguous(
clpB, transb) # as cublas-style
ldc = clpC.shape[1]
blasfunc('C', transa, transb, m, n, k,
1.0, clpA, lda,
clpB, ldb,
0.0, clpC, ldc
)
actualC = clpC.get().T # as row-major
expectedC = numpy.dot(npA, npB.T) # row-major caluculation
self.assertTrue(numpy.allclose(expectedC, actualC))
@for_each_dtype_and_blasfunc_pair(GEMM_pairs)
def test_column_matrix_row_matrix(self, dtype, blasfunc):
npA = numpy.array([
[1, 2, 3],
[4, 5, 6]], dtype=dtype) # column-major
# 1 4
# 2 5
# 3 6
npB = numpy.array([
[10, 11],
[13, 14]], dtype=dtype) # row-major
# 10 11
# 13 14
transa = 1 # A is transposed in c-style(row-major)
transb = 0 # B is not transposed in c-style(row-major)
m = npA.shape[1] # op(A) rows = (A in column-major) rows = C rows
n = npB.shape[1] # op(B) cols = (B in row-major) cols = C cols
k = npA.shape[0] # op(A) cols = (A in column-major) cols = op(B) rows
clpA = clpy.ndarray(npA.shape, dtype=dtype)
clpA.set(npA)
clpB = clpy.ndarray(npB.shape, dtype=dtype)
clpB.set(npB)
clpC = clpy.ndarray((n, m), dtype=dtype) # column-major
clpA, transa, lda = core._mat_to_cublas_contiguous(
clpA, transa) # as cublas-style
clpB, transb, ldb = core._mat_to_cublas_contiguous(
clpB, transb) # as cublas-style
ldc = clpC.shape[1]
blasfunc('C', transa, transb, m, n, k,
1.0, clpA, lda,
clpB, ldb,
0.0, clpC, ldc
)
actualC = clpC.get().T # as row-major
expectedC = numpy.dot(npA.T, npB) # row-major caluculation
self.assertTrue(numpy.allclose(expectedC, actualC))
@for_each_dtype_and_blasfunc_pair(GEMM_pairs)
def test_column_matrix_row_vector(self, dtype, blasfunc):
npA = numpy.array([
[1, 2, 3],
[4, 5, 6]], dtype=dtype) # column-major
# 1 4
# 2 5
# 3 6
npB = numpy.array([
[10],
[13]], dtype=dtype) # row-major
# 10
# 13
transa = 1 # A is transposed in c-style(row-major)
transb = 0 # B is not transposed in c-style(row-major)
m = npA.shape[1] # op(A) rows = (A in column-major) rows = C rows
n = npB.shape[1] # op(B) cols = (B in row-major) cols = C cols
k = npA.shape[0] # op(A) cols = (A in column-major) cols = op(B) rows
clpA = clpy.ndarray(npA.shape, dtype=dtype)
clpA.set(npA)
clpB = clpy.ndarray(npB.shape, dtype=dtype)
clpB.set(npB)
clpC = clpy.ndarray((n, m), dtype=dtype) # column-major
clpA, transa, lda = core._mat_to_cublas_contiguous(
clpA, transa) # as cublas-style
clpB, transb, ldb = core._mat_to_cublas_contiguous(
clpB, transb) # as cublas-style
ldc = clpC.shape[1]
blasfunc('C', transa, transb, m, n, k,
1.0, clpA, lda,
clpB, ldb,
0.0, clpC, ldc
)
actualC = clpC.get().T # as row-major
expectedC = numpy.dot(npA.T, npB) # row-major caluculation
self.assertTrue(numpy.allclose(expectedC, actualC))
@for_each_dtype_and_blasfunc_pair(GEMM_pairs)
def test_column_vector_row_matrix(self, dtype, blasfunc):
npA = numpy.array([
[1],
[2],
[3]], dtype=dtype) # column-major
# 1 2 3
npB = numpy.array([
[10, 11],
[13, 14],
[16, 17]], dtype=dtype) # row-major
# 10 11
# 13 14
# 16 17
transa = 1 # A is transposed in c-style(row-major)
transb = 0 # B is not transposed in c-style(row-major)
m = npA.shape[1] # op(A) rows = (A in column-major) rows = C rows
n = npB.shape[1] # op(B) cols = (B in row-major) cols = C cols
k = npA.shape[0] # op(A) cols = (A in column-major) cols = op(B) rows
clpA = clpy.ndarray(npA.shape, dtype=dtype)
clpA.set(npA)
clpB = clpy.ndarray(npB.shape, dtype=dtype)
clpB.set(npB)
clpC = clpy.ndarray((n, m), dtype=dtype) # column-major
clpA, transa, lda = core._mat_to_cublas_contiguous(
clpA, transa) # as cublas-style
clpB, transb, ldb = core._mat_to_cublas_contiguous(
clpB, transb) # as cublas-style
ldc = clpC.shape[1]
blasfunc('C', transa, transb, m, n, k,
1.0, clpA, lda,
clpB, ldb,
0.0, clpC, ldc
)
actualC = clpC.get().T # as row-major
expectedC = numpy.dot(npA.T, npB) # row-major caluculation
self.assertTrue(numpy.allclose(expectedC, actualC))
@for_each_dtype_and_blasfunc_pair(GEMM_pairs)
def test_invalid_transa(self, dtype, blasfunc):
npA = numpy.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=dtype)
npB = numpy.array([[10, 11, 12], [13, 14, 15], [
16, 17, 18]], dtype=dtype)
clpA = clpy.ndarray(npA.shape, dtype=dtype)
clpA.set(npA)
clpB = clpy.ndarray(npB.shape, dtype=dtype)
clpB.set(npB)
expectedC = numpy.dot(npA, npB).T
clpC = clpy.ndarray(expectedC.shape, dtype=dtype)
m = npA.shape[0]
n = npB.shape[1]
k = npA.shape[1]
with self.assertRaises(ValueError):
blasfunc('C', transa='a', transb='t',
m=m, n=n, k=k,
alpha=1.0, A=clpA, lda=k,
B=clpB, ldb=n,
beta=0.0,
C=clpC, ldc=m
)
@for_each_dtype_and_blasfunc_pair(GEMM_pairs)
def test_invalid_transb(self, dtype, blasfunc):
npA = numpy.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=dtype)
npB = numpy.array([[10, 11, 12], [13, 14, 15], [
16, 17, 18]], dtype=dtype)
clpA = clpy.ndarray(npA.shape, dtype=dtype)
clpA.set(npA)
clpB = clpy.ndarray(npB.shape, dtype=dtype)
clpB.set(npB)
expectedC = numpy.dot(npA, npB).T
clpC = clpy.ndarray(expectedC.shape, dtype=dtype)
m = npA.shape[0]
n = npB.shape[1]
k = npA.shape[1]
with self.assertRaises(ValueError):
blasfunc('C', transa='n', transb='a',
m=m, n=n, k=k,
alpha=1.0, A=clpA, lda=k,
B=clpB, ldb=n,
beta=0.0,
C=clpC, ldc=m
)
@for_each_dtype_and_blasfunc_pair(GEMM_pairs)
def test_alpha_matrix_matrix(self, dtype, blasfunc):
npA = numpy.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=dtype) # row-major
npB = numpy.array([[10, 11, 12], [13, 14, 15], [
16, 17, 18]], dtype=dtype) # row-major
transa = 0 # A is not transposed in c-style(row-major)
transb = 0 # B is not transposed in c-style(row-major)
alpha = 2.0
beta = 0.0
m = npA.shape[0]
n = npB.shape[1]
k = npA.shape[1]
clpA = clpy.ndarray(npA.shape, dtype=dtype) # col major in clpy
clpA.set(npA)
clpB = clpy.ndarray(npB.shape, dtype=dtype) # col major in clpy
clpB.set(npB)
expectedC = numpy.dot(npA, npB) * alpha
clpC = clpy.ndarray(expectedC.shape, dtype=dtype)
clpA, transa, lda = core._mat_to_cublas_contiguous(
clpA, transa) # as cublas-style
clpB, transb, ldb = core._mat_to_cublas_contiguous(
clpB, transb) # as cublas-style
ldc = clpC.shape[1]
# alpha * (A^t x B^T) in col-major = alpha * AxB in row major
blasfunc('C', transa, transb,
m, n, k, alpha,
clpA, lda,
clpB, ldb,
beta, clpC, ldc
)
actualC = clpC.get().T # as row-major
self.assertTrue(numpy.allclose(expectedC, actualC))
@for_each_dtype_and_blasfunc_pair(GEMM_pairs)
def test_beta_matrix_matrix(self, dtype, blasfunc):
npA = numpy.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=dtype) # row-major
npB = numpy.array([[10, 11, 12], [13, 14, 15], [
16, 17, 18]], dtype=dtype) # row-major
npC = numpy.array([[19, 20, 21], [22, 23, 24], [
25, 26, 27]], dtype=dtype) # row-major
transa = 0 # A is not transposed in c-style(row-major)
transb = 0 # B is not transposed in c-style(row-major)
alpha = 1.0
beta = 2.0
m = npA.shape[0]
n = npB.shape[1]
k = npA.shape[1]
clpA = clpy.ndarray(npA.shape, dtype=dtype) # col-major in clpy
clpA.set(npA)
clpB = clpy.ndarray(npB.shape, dtype=dtype) # col-major in clpy
clpB.set(npB)
clpC = clpy.ndarray(npC.shape, dtype=dtype) # col-major in clpy
clpC.set(npC.T) # transpose C
clpA, transa, lda = core._mat_to_cublas_contiguous(
clpA, transa) # as cublas-style
clpB, transb, ldb = core._mat_to_cublas_contiguous(
clpB, transb) # as cublas-style
ldc = clpC.shape[1]
# AxB + beta*C
expectedC = numpy.add(numpy.dot(npA, npB), beta * npC)
# (A^T x B^T) + C^T in col-major = A x B + C in row-major
blasfunc('C', transa, transb,
m, n, k, alpha,
clpA, lda,
clpB, ldb,
beta, clpC, ldc
)
actualC = clpC.get().T # as row-major
self.assertTrue(numpy.allclose(expectedC, actualC))
@for_each_dtype_and_blasfunc_pair(GEMM_pairs)
def test_beta_0_matrix_matrix(self, dtype, blasfunc):
npA = numpy.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=dtype) # row-major
npB = numpy.array([[10, 11, 12], [13, 14, 15], [
16, 17, 18]], dtype=dtype) # row-major
transa = 0 # A is not transposed in c-style(row-major)
transb = 0 # B is not transposed in c-style(row-major)
alpha = 1.0
beta = 0.0
m = npA.shape[0]
n = npB.shape[1]
k = npA.shape[1]
clpA = clpy.ndarray(npA.shape, dtype=dtype) # col major in clpy
clpA.set(npA)
clpB = clpy.ndarray(npB.shape, dtype=dtype) # col major in clpy
clpB.set(npB)
expectedC = numpy.dot(npA, npB) * alpha
clpC = clpy.ndarray(expectedC.shape, dtype=dtype)
clpC.fill(numpy.nan)
clpA, transa, lda = core._mat_to_cublas_contiguous(
clpA, transa) # as cublas-style
clpB, transb, ldb = core._mat_to_cublas_contiguous(
clpB, transb) # as cublas-style
ldc = clpC.shape[1]
# alpha * (A^t x B^T) in col-major = alpha * AxB in row major
blasfunc('C', transa, transb,
m, n, k, alpha,
clpA, lda,
clpB, ldb,
beta, clpC, ldc
)
actualC = clpC.get().T # as row-major
self.assertTrue(numpy.allclose(expectedC, actualC))
@for_each_dtype_and_blasfunc_pair(GEMM_pairs)
def test_chunk_gemm_A(self, dtype, blasfunc):
# create chunk and free to prepare chunk in pool
pool = clpy.backend.memory.SingleDeviceMemoryPool()
clpy.backend.memory.set_allocator(pool.malloc)
pooled_chunk_size = pool._allocation_unit_size * 2
tmp = pool.malloc(pooled_chunk_size)
pool.free(tmp.buf, pooled_chunk_size, 0)
size = 3
wrong_value = numpy.nan
npA = numpy.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=dtype)
npB = numpy.array([[10, 11, 12], [13, 14, 15],
[16, 17, 18]], dtype=dtype)
transa = 0 # A is not transposed in c-style(row-major)
transb = 0 # B is not transposed in c-style(row-major)
alpha = 1.0
beta = 0.0
m = npA.shape[0]
n = npB.shape[1]
k = npA.shape[1]
dummy = clpy.empty(size, dtype)
dummy.fill(wrong_value)
# clpA is chunk with offset != 0
clpA = clpy.empty(npA.shape, dtype=dtype)
self.assertTrue(clpA.data.mem.offset != 0)
clpA.set(npA)
# clpB is chunk with offset == 0
clpB = clpy.empty(npB.shape, dtype=dtype)
self.assertTrue(clpB.data.mem.offset == 0)
clpB.set(npB)
expectedC = numpy.dot(npA, npB)
clpC = clpy.ndarray(expectedC.shape, dtype=dtype)
clpA, transa, lda = core._mat_to_cublas_contiguous(
clpA, transa) # as cublas-style
clpB, transb, ldb = core._mat_to_cublas_contiguous(
clpB, transb) # as cublas-style
ldc = clpC.shape[1]
blasfunc('C', transa, transb,
m, n, k, alpha,
clpA, lda,
clpB, ldb,
beta, clpC, ldc
)
actualC = clpC.get().T
clpy.backend.memory.set_allocator()
self.assertTrue(numpy.allclose(expectedC, actualC))
@for_each_dtype_and_blasfunc_pair(GEMM_pairs)
def test_chunk_gemm_B(self, dtype, blasfunc):
# create chunk and free to prepare chunk in pool
pool = clpy.backend.memory.SingleDeviceMemoryPool()
clpy.backend.memory.set_allocator(pool.malloc)
pooled_chunk_size = pool._allocation_unit_size * 2
tmp = pool.malloc(pooled_chunk_size)
pool.free(tmp.buf, pooled_chunk_size, 0)
size = 3
wrong_value = numpy.nan
npA = numpy.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=dtype)
npB = numpy.array([[10, 11, 12], [13, 14, 15],
[16, 17, 18]], dtype=dtype)
transa = 0 # A is not transposed in c-style(row-major)
transb = 0 # B is not transposed in c-style(row-major)
alpha = 1.0
beta = 0.0
m = npA.shape[0]
n = npB.shape[1]
k = npA.shape[1]
dummy = clpy.empty(size, dtype)
dummy.fill(wrong_value)
# clpB is chunk with offset != 0
clpB = clpy.empty(npB.shape, dtype=dtype)
self.assertTrue(clpB.data.mem.offset != 0)
clpB.set(npB)
# clpA is chunk with offset == 0
clpA = clpy.empty(npA.shape, dtype=dtype)
self.assertTrue(clpA.data.mem.offset == 0)
clpA.set(npA)
expectedC = numpy.dot(npA, npB)
clpC = clpy.ndarray(expectedC.shape, dtype=dtype)
clpA, transa, lda = core._mat_to_cublas_contiguous(
clpA, transa) # as cublas-style
clpB, transb, ldb = core._mat_to_cublas_contiguous(
clpB, transb) # as cublas-style
ldc = clpC.shape[1]
blasfunc('C', transa, transb,
m, n, k, alpha,
clpA, lda,
clpB, ldb,
beta, clpC, ldc
)
actualC = clpC.get().T
clpy.backend.memory.set_allocator()
self.assertTrue(numpy.allclose(expectedC, actualC))
@for_each_dtype_and_blasfunc_pair(GEMM_pairs)
def test_chunk_gemm_C(self, dtype, blasfunc):
# create chunk and free to prepare chunk in pool
pool = clpy.backend.memory.SingleDeviceMemoryPool()
clpy.backend.memory.set_allocator(pool.malloc)
pooled_chunk_size = pool._allocation_unit_size * 2
tmp = pool.malloc(pooled_chunk_size)
pool.free(tmp.buf, pooled_chunk_size, 0)
size = 3
wrong_value = numpy.nan
npA = numpy.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=dtype)
npB = numpy.array([[10, 11, 12], [13, 14, 15],
[16, 17, 18]], dtype=dtype)
npC = numpy.array([[19, 20, 21], [22, 23, 24],
[25, 26, 27]], dtype=dtype)
transa = 0 # A is not transposed in c-style(row-major)
transb = 0 # B is not transposed in c-style(row-major)
alpha = 1.0
beta = 1.0
m = npA.shape[0]
n = npB.shape[1]
k = npA.shape[1]
expectedC = numpy.add(numpy.dot(npA, npB), beta * npC)
dummy = clpy.empty(size, dtype)
dummy.fill(wrong_value)
# clpC is chunk with offset != 0
clpC = clpy.ndarray(expectedC.shape, dtype=dtype)
self.assertTrue(clpC.data.mem.offset != 0)
clpC.set(npC.T)
# clpA is chunk with offset == 0
clpA = clpy.empty(npA.shape, dtype=dtype)
self.assertTrue(clpA.data.mem.offset == 0)
clpA.set(npA)
# clpB is chunk with offset == 0
clpB = clpy.empty(npB.shape, dtype=dtype)
self.assertTrue(clpB.data.mem.offset == 0)
clpB.set(npB)
clpA, transa, lda = core._mat_to_cublas_contiguous(
clpA, transa) # as cublas-style
clpB, transb, ldb = core._mat_to_cublas_contiguous(
clpB, transb) # as cublas-style
ldc = clpC.shape[1]
blasfunc('C', transa, transb,
m, n, k, alpha,
clpA, lda,
clpB, ldb,
beta, clpC, ldc
)
actualC = clpC.get().T
clpy.backend.memory.set_allocator()
self.assertTrue(numpy.allclose(expectedC, actualC))
@for_each_dtype_and_blasfunc_pair(GEMM_pairs)
def test_strides_transpose_A(self, dtype, blasfunc):
npA = numpy.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=dtype) # row-major
npB = numpy.array([[10, 11, 12], [13, 14, 15], [
16, 17, 18]], dtype=dtype) # row-major
npC = numpy.array([[19, 20, 21], [22, 23, 24], [
25, 26, 27]], dtype=dtype) # row-major
alpha = 1.1
beta = 2.1
m = npA.shape[1]
n = npB.shape[1]
k = npA.shape[0]
clpA = clpy.ndarray(npA.shape, dtype=dtype) # col-major in clpy
clpA.set(npA)
clpB = clpy.ndarray(npB.shape, dtype=dtype) # col-major in clpy
clpB.set(npB)
# change A.strides
clpA = clpA.transpose(1, 0)
npA = npA.transpose(1, 0)
clpC = clpy.ndarray(npC.shape, dtype=dtype) # col-major in clpy
clpC.set(npC.T) # transpose C
transa = 0 # A is not transposed in c-style(row-major)
transb = 0 # B is not transposed in c-style(row-major)
clpA, transa, lda = core._mat_to_cublas_contiguous(
clpA, transa) # as cublas-style
clpB, transb, ldb = core._mat_to_cublas_contiguous(
clpB, transb) # as cublas-style
ldc = clpC.shape[1]
# AxB + beta*C
expectedC = numpy.add(alpha * numpy.dot(npA, npB), beta * npC)
# (A^T x B^T) + C^T in col-major = A x B + C in row-major
blasfunc('C', transa, transb,
m, n, k, alpha,
clpA, lda,
clpB, ldb,
beta, clpC, ldc
)
actualC = clpC.get().T # as row-major
self.assertTrue(numpy.allclose(expectedC, actualC))
TRSM_pairs = [
('float32', clblast.strsm)
]
class TestBlas3TRSM(unittest.TestCase):
"""test class of CLBlast BLAS-3 TRSM functions"""
@for_each_dtype_and_blasfunc_pair(TRSM_pairs)
def test_strsm_works(self, dtype, blasfunc):
npA = numpy.array([
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], dtype=dtype)
npB = numpy.array([
[10, 11, 12],
[13, 14, 15],
[16, 17, 18]], dtype=dtype)
m = npA.shape[0]
n = npA.shape[1]
clpA = clpy.ndarray(npA.shape, dtype=dtype)
clpA.set(npA)
clpB = clpy.ndarray(npB.shape, dtype=dtype)
clpB.set(npB)
alpha = 1.0
layout = 'R'
side = 'L'
triangle = 'U'
transa = 'N'
diagonal = 'N'
blasfunc(layout, side, triangle, transa, diagonal,
m, n, alpha, clpA, n, clpB, n)
actualB = clpB.get()
expectedB = numpy.dot(numpy.linalg.inv(numpy.triu(npA)), npB)
self.assertTrue( | numpy.allclose(expectedB, actualB) | numpy.allclose |
__all__ = [
"IMCCE",
"leaky_relu",
"load_config",
"log",
"make_triplet",
"MPC",
"reticle",
"plot_stack",
"preprocess_stack",
"time_stamp",
"query_horizons",
"radec_str2deg",
"relu",
]
from astropy.coordinates import SkyCoord
from astropy.io import fits
import astropy.units as u
from astropy.visualization import (
# AsymmetricPercentileInterval,
ZScaleInterval,
LinearStretch,
LogStretch,
ImageNormalize,
)
from astroquery.imcce import Skybot
from bs4 import BeautifulSoup
from bson.json_util import dumps, loads
from copy import deepcopy
from cycler import cycler
import datetime
import gzip
import io
# from matplotlib.colors import LogNorm
from matplotlib.path import Path
import matplotlib.pyplot as plt
from multiprocessing.pool import ThreadPool
import numpy as np
import re
import requests
from requests_html import HTMLSession
import traceback
from tqdm.auto import tqdm
import yaml
s = requests.Session()
def load_config(config_file="./config.yaml"):
"""
Load config and secrets
"""
with open(config_file) as cyaml:
config = yaml.load(cyaml, Loader=yaml.FullLoader)
return config
def time_stamp():
"""
:return: UTC time -> string
"""
return datetime.datetime.utcnow().strftime("%Y%m%d_%H:%M:%S")
def log(message):
print(f"{time_stamp()}: {message}")
def relu(a):
a[a < 0] = 0
return a
def leaky_relu(a, const=1e-7):
a[a <= 0] = const
return a
def radec_str2deg(_ra_str, _dec_str):
"""
:param _ra_str: 'H:M:S'
:param _dec_str: 'D:M:S'
:return: ra, dec in deg
"""
# convert to deg:
_ra = list(map(float, _ra_str.split(":")))
_ra = (_ra[0] + _ra[1] / 60.0 + _ra[2] / 3600.0) * np.pi / 12.0
_dec = list(map(float, _dec_str.split(":")))
_sign = -1 if _dec_str.strip()[0] == "-" else 1
_dec = (
_sign
* (abs(_dec[0]) + abs(_dec[1]) / 60.0 + abs(_dec[2]) / 3600.0)
* np.pi
/ 180.0
)
return _ra * 180.0 / np.pi, _dec * 180.0 / np.pi
def query_horizons(record_id: int, position: str = "I41@399", jd=None):
try:
url = (
"http://ssd.jpl.nasa.gov/horizons_batch.cgi?batch=l"
f"&COMMAND='{record_id}'"
f"&CENTER='{position}'"
"&MAKE_EPHEM='YES'"
"&TABLE_TYPE='OBSERVER'"
f"&START_TIME='JD {jd}'"
f"&STOP_TIME='JD {jd + 1e-6}'"
"&STEP_SIZE='1 m'"
"&CAL_FORMAT='CAL'"
"&TIME_DIGITS='MINUTES'"
"&ANG_FORMAT='HMS'"
"&OUT_UNITS='KM-S'"
"&RANGE_UNITS='AU'"
"&APPARENT='AIRLESS'"
"&SUPPRESS_RANGE_RATE='NO'"
"&SKIP_DAYLT='NO'"
"&EXTRA_PREC='NO'"
"&R_T_S_ONLY='NO'"
"&REF_SYSTEM='J2000'"
"&CSV_FORMAT='NO'"
"&OBJ_DATA='NO'"
"&QUANTITIES='1,2,3,4,9'"
)
r = s.get(url)
if r.status_code == requests.codes.ok:
resp = r.text.split("\n")
i_start = [ir for ir, rr in enumerate(resp) if rr == "$$SOE"][0]
i_stop = [ir for ir, rr in enumerate(resp) if rr == "$$EOE"][0]
record = resp[i_start + 1 : i_stop]
# print(record)
tmp = record[0].split()
raw_date = "_".join(tmp[:2]).strip()
if "." in raw_date:
dt = datetime.datetime.strptime(raw_date, "%Y-%b-%d_%H:%M:%S.%f")
elif raw_date.count(":") == 2:
dt = datetime.datetime.strptime(raw_date, "%Y-%b-%d_%H:%M:%S")
elif raw_date.count(":") == 1:
dt = datetime.datetime.strptime(raw_date, "%Y-%b-%d_%H:%M")
else:
dt = None
if len(tmp) == 21:
data = {
"t_utc": dt,
"ra_str": ":".join(tmp[3:6]),
"dec_str": ":".join(tmp[6:9]),
"ra_apparent_str": ":".join(tmp[9:12]),
"dec_apparent_str": ":".join(tmp[12:15]),
"dRA*cosD": float(tmp[15]),
"d(DEC)/dt": float(tmp[16]),
"az": float(tmp[17]),
"el": float(tmp[18]),
"T_mag": float(tmp[19]) if tmp[19] != "n.a." else None,
"N_mag": float(tmp[20]) if tmp[20] != "n.a." else None,
}
elif len(tmp) == 20:
data = {
"t_utc": dt,
"ra_str": ":".join(tmp[2:5]),
"dec_str": ":".join(tmp[5:8]),
"ra_apparent_str": ":".join(tmp[8:11]),
"dec_apparent_str": ":".join(tmp[11:14]),
"dRA*cosD": float(tmp[14]),
"d(DEC)/dt": float(tmp[15]),
"az": float(tmp[16]),
"el": float(tmp[17]),
"T_mag": float(tmp[18]) if tmp[18] != "n.a." else None,
"N_mag": float(tmp[19]) if tmp[19] != "n.a." else None,
}
ra, dec = radec_str2deg(data["ra_str"], data["dec_str"])
data["ra_jpl"], data["dec_jpl"] = ra, dec
return data
else:
return None
except Exception as e:
print(e)
traceback.print_exc()
return None
def make_triplet(alert, normalize: bool = False):
"""
Feed in alert packet
"""
cutout_dict = dict()
for cutout in ("science", "template", "difference"):
cutout_data = loads(
dumps([alert[f"cutout{cutout.capitalize()}"]["stampData"]])
)[0]
# unzip
with gzip.open(io.BytesIO(cutout_data), "rb") as f:
with fits.open(io.BytesIO(f.read())) as hdu:
data = hdu[0].data
# replace nans with zeros
cutout_dict[cutout] = np.nan_to_num(data)
# normalize
if normalize:
cutout_dict[cutout] /= np.linalg.norm(cutout_dict[cutout])
# pad to 63x63 if smaller
shape = cutout_dict[cutout].shape
if shape != (63, 63):
cutout_dict[cutout] = np.pad(
cutout_dict[cutout],
[(0, 63 - shape[0]), (0, 63 - shape[1])],
mode="constant",
constant_values=1e-9,
)
triplet = np.zeros((63, 63, 3))
triplet[:, :, 0] = cutout_dict["science"]
triplet[:, :, 1] = cutout_dict["template"]
triplet[:, :, 2] = cutout_dict["difference"]
return triplet
def reticle(inner=0.5, outer=1.0, angle=0.0, which="lrtb"):
"""Create a reticle or crosshairs marker.
Author: <NAME>
Parameters
----------
inner : float
Distance from the origin to the inside of the crosshairs.
outer : float
Distance from the origin to the outside of the crosshairs.
angle : float
Rotation in degrees; 0 for a '+' orientation and 45 for 'x'.
Returns
-------
path : `matplotlib.path.Path`
The new marker path, suitable for passing to Matplotlib functions
(e.g., `plt.plot(..., marker=reticle())`)
Examples
--------
.. plot::
:context: reset
:include-source:
:align: center
from matplotlib import pyplot as plt
from ligo.skymap.plot.marker import reticle
markers = [reticle(inner=0),
reticle(which='lt'),
reticle(which='lt', angle=45)]
fig, ax = plt.subplots(figsize=(6, 2))
ax.set_xlim(-0.5, 2.5)
ax.set_ylim(-0.5, 0.5)
for x, marker in enumerate(markers):
ax.plot(x, 0, markersize=20, markeredgewidth=2, marker=marker)
"""
angle = np.deg2rad(angle)
x = np.cos(angle)
y = np.sin(angle)
rotation = [[x, y], [-y, x]]
vertdict = {"l": [-1, 0], "r": [1, 0], "b": [0, -1], "t": [0, 1]}
verts = [vertdict[direction] for direction in which]
codes = [Path.MOVETO, Path.LINETO] * len(verts)
verts = np.dot(verts, rotation)
verts = np.swapaxes([inner * verts, outer * verts], 0, 1).reshape(-1, 2)
return Path(verts, codes)
def plot_stack(stack, reticles=None, zscale=True, save=False, **kwargs):
"""
:param stack: assuming "channels_last" shape
:param reticles:
:param zscale:
:param save:
:param kwargs:
:return:
"""
w = kwargs.get("w", 8)
h = kwargs.get("h", 2)
dpi = kwargs.get("dpi", 120)
cmap = kwargs.get("cmap", plt.cm.cividis)
origin = kwargs.get("origin", "lower")
titles = kwargs.get("titles", None) # should be of shape (n_i, )
# number of images in the stack to plot
n_i = stack.shape[-1]
plt.close("all")
# cmap: plt.cm.cividis, plt.cm.bone
fig = plt.figure(figsize=(w, h), dpi=dpi)
# number of rows and columns
n_r = kwargs.get("n_r", 1)
n_c = kwargs.get("n_c", n_i)
for i in range(n_i):
ax = fig.add_subplot(n_r, n_c, i + 1)
ax.axis("off")
img = deepcopy(stack[..., i])
# print(img)
# replace dubiously large values
xl = np.greater(np.abs(img), 1e20, where=~np.isnan(img))
if img[xl].any():
img[xl] = np.nan
img[np.abs(img) < 0.1] = np.nan
if np.isnan(img).any():
median = float(np.nanmean(img.flatten()))
img = | np.nan_to_num(img, nan=median) | numpy.nan_to_num |
from __future__ import print_function, division, absolute_import
import functools
import sys
import warnings
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import dtypes as iadt
from imgaug.testutils import (array_equal_lists, keypoints_equal, reseed,
runtest_pickleable_uint8_img)
import imgaug.augmenters.arithmetic as arithmetic_lib
import imgaug.augmenters.contrast as contrast_lib
class TestAdd(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.Add(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.Add(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [float, int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.Add(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_add_floats(self):
# specific tests with floats
aug = iaa.Add(value=0.75)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
aug = iaa.Add(value=0.45)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert 100 - 3 <= | np.average(observed) | numpy.average |
import gym
import torch
import numpy as np
import matplotlib.pyplot as plt
import pickle
from lib.util import to_device
def plot(frame_idx, rewards, fpaths, name):
#plt.figure(figsize=(20,20))
#plt.subplot(111)
#plt.title('frame %s. reward: %s' % (frame_idx, rewards[-1]))
plt.title(name+' (X-axis: iteration Y-axis: Reward(avg))')
plt.plot(rewards)
#plt.show()
plt.savefig(fpaths+name+'.png')
plt.close()
def generate(model, env, env_name, als, device, data_n_steps, paths, fpaths, vv, max_genert_num, zflt, nn='0', expert_reward=None, D=None):
#device = torch.device('cpu')
states, actions, rewards, dones, values = [], [], [], [], []
k = 0
genert_num = 0
xran = []
mean_rwds = []
mean_drwds = []
while genert_num < max_genert_num:
genert_num += 1
#for i in range(data_n_steps + 1):
num_step = 0
num_episode = 0
total_rwd = 0
total_drwd = 0
max_rwd = -1e6
min_rwd = 1e6
states, actions, rewards, dones, values = [], [], [], [], []
while num_step < data_n_steps:
state = env.reset()
state = zflt(state)
rwd = 0.0
drwd = 0.0
for t in range(10000):
k += 1
#num_step += 1
states.append(state)
state_ = torch.tensor(state).unsqueeze(0).to(device)
with torch.no_grad():
#action_probs = model.select_action(state_)[0].cpu().numpy()
action_probs, mean, std = model.select_action(state_)
action_probs = action_probs[0].cpu().numpy()
action_probs = action_probs.astype(np.float64)
next_state, r, done, _ = env.step(action_probs)
#next_state = np.clip(next_state, -5, 5)
next_state = zflt(next_state)
actions.append(action_probs)
rwd += r
if expert_reward is not None:
v = expert_reward(D, state, device, action_probs)
drwd += v
values.append(v)
rewards.append(r)
if done:
dones.append(0)
else:
dones.append(1)
if done:# or num_step >= data_n_steps:
#state = env.reset()
break
state = next_state
num_step += t + 1
num_episode += 1
total_rwd += rwd
max_rwd = max(max_rwd, rwd)
min_rwd = min(min_rwd, rwd)
if expert_reward is not None:
total_drwd += drwd
yield {'states':states, 'actions':actions, 'rewards':rewards, 'dones':dones, 'values':values, 'mean':mean, 'std':std}
xran.append(k)
mean_rwd = total_rwd/num_episode
mean_rwds.append(mean_rwd)
print('ts %d\t genert_num %d\t min_rwd %.2f\t max_rwd %.2f\t mean_rwd %.2f\t' %(k,genert_num, min_rwd, max_rwd, mean_rwd))
#plot(1, trj_rwds, 'halfcheetah-v2-5')
plot(1, mean_rwds, fpaths, env_name+'_'+als+'vv'+vv)
np.save(paths+env_name+'_'+als+vv+'_plot.npy', np.array(mean_rwds))
np.save(paths+env_name+'_'+als+vv+'_ppo_rewards.npy', np.array(rewards))
np.save(paths+env_name+'_'+als+vv+'_ppo_xran.npy', np.array(xran))
def save_expert(args, policy_model, env, env_name, als, device, data_n_steps, paths, vv, max_expert_num, zflt, mm='0'):
num_steps = 0
experts = []
rewards = []
#max_expert_num = 5000
while num_steps < max_expert_num:
state = env.reset()
state = zflt(state)
done = False
reward = []
while not done:
#state = np.clip(state, -5, 5)
state_ = torch.tensor(state).unsqueeze(0).to(device)
#action_probs = agent(state)
with torch.no_grad():
action_probs = policy_model.select_action(state_)[0].cpu().numpy()
next_state, r, done, _ = env.step(action_probs)
next_state = zflt(next_state)
#actions.append(action_probs)
reward.append(r)
experts.append(np.hstack([state, action_probs]))
state = next_state
num_steps += 1
rewards.append(reward)
print('num_steps % d reward %.4f ' %(num_steps, sum(reward)))
experts = np.stack(experts)
rewards = np.array(rewards)
np.save(paths+env_name+'_'+als+vv+'_state_action.npy', experts)
#np.save('./expert_trj/halfcheetah'+'_state_action5.npy', experts)
np.save(paths+env_name+'_'+als+vv+'_exp_rewards.npy', rewards)
#np.save('./expert_trj/halfcheetah'+'_rewards5.npy', rewards)
pickle.dump(zflt, open(paths+env_name+'_expert'+vv+'.p', 'wb'))
def generate2(model, env, env_name, als, device, data_n_steps, paths, fpaths, vv, max_genert_num, zflt, critic_model, arg_action, seed, expert_reward=None, D=None, mm = '0'):
#device = torch.device('cpu')
states, actions, rewards, dones, values = [], [], [], [], []
k = 0
genert_num = 0
xran = []
mean_rwds = []
mean_drwds = []
rewards_env = []
while genert_num < max_genert_num:
genert_num += 1
#for i in range(data_n_steps + 1):
num_step = 0
num_episode = 0
total_rwd = 0
total_drwd = 0
max_rwd = -1e6
min_rwd = 1e6
states, actions, rewards, dones, values = [], [], [], [], []
while num_step < data_n_steps:
state = env.reset()
state = zflt(state)
rwd = 0.0
drwd = 0.0
prev_state_ = torch.tensor(state).unsqueeze(0).to(device)
prev_state = state
for t in range(10000):
k += 1
#num_step += 1
states.append(state)
state_ = torch.tensor(state).unsqueeze(0).to(device)
with torch.no_grad():
#action_probs = model.select_action(state_)[0].cpu().numpy()
action_probs, mean, std = model.select_action(state_)
action_probs = action_probs[0].cpu().numpy()
state__ = state_
#if arg_action == '3':
# state__ = torch.cat([prev_state_, state_], -1)
value = critic_model(state__)[0][0].cpu().numpy()
action_probs = action_probs.astype(np.float64)
next_state, r, done, _ = env.step(action_probs)
#next_state = np.clip(next_state, -5, 5)
next_state = zflt(next_state)
actions.append(action_probs)
rwd += r
#if expert_reward is not None:
_state = state
if arg_action == '3':
_state = np.hstack([prev_state, state])
v = expert_reward(D, _state, action_probs, device, arg_action)
drwd += v
values.append(value)
rewards_env.append(r)
rewards.append(v)
if done:
dones.append(0)
else:
dones.append(1)
if done:# or num_step >= data_n_steps:
#state = env.reset()
break
prev_state_ = state_
prev_state = state
state = next_state
num_step += t + 1
num_episode += 1
total_rwd += rwd
max_rwd = max(max_rwd, rwd)
min_rwd = min(min_rwd, rwd)
if expert_reward is not None:
total_drwd += drwd
yield {'states':states, 'actions':actions, 'rewards':rewards, 'dones':dones, 'values':values, 'mean':mean, 'std':std}
xran.append(k)
mean_rwd = total_rwd / num_episode
mean_drwd = total_drwd / num_episode
mean_rwds.append(mean_rwd)
mean_drwds.append(mean_drwd)
print('ts %d\t genert_num %d\t dones %d\t min_rwd %.2f\t max_rwd %.2f\t mean_rwd %.2f\t mean_drwd %.2f\t' %(k, genert_num, sum(dones), min_rwd, max_rwd, mean_rwd, mean_drwd))
#plot(1, trj_rwds, 'halfcheetah-v2-5')
if als == 'gail':
signs = ''
if arg_action == '1':
signs = '_no_action'
elif arg_action == '2':
signs = '_agent_ac'
elif arg_action == '3':
signs = '_agent_st'
plot(1, mean_rwds, fpaths, env_name+'_'+als+'vv'+vv+'mm'+mm+signs+'_seed'+str(seed)+'_env_reward')
plot(0, mean_drwds, fpaths, env_name+'_'+als+'vv'+vv+'mm'+mm+signs+'_seed'+str(seed)+'_define_reward')
np.save(paths+env_name+'_'+als+'vv'+vv+'mm'+mm+signs+'_seed'+str(seed)+'_plot.npy', np.array(mean_rwds))
np.save(paths+env_name+'_'+als+'vv'+vv+'mm'+mm+signs+'_seed'+str(seed)+'_genv_rewards.npy', np.array(rewards_env))
np.save(paths+env_name+'_'+als+'vv'+vv+'mm'+mm+signs+'_seed'+str(seed)+'_xran.npy', np.array(xran))
else:
plot(1, mean_rwds, fpaths, env_name+'_'+als+vv)
np.save(paths+env_name+'_'+als+vv+'_seed'+str(seed)+'_plot.npy', np.array(mean_rwds))
np.save(paths+env_name+'_'+als+vv+'_seed'+str(seed)+'_xran.npy', | np.array(xran) | numpy.array |
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_allclose
from numpy.testing import assert_array_almost_equal
from ruspy.config import TEST_RESOURCES_DIR
from ruspy.estimation.est_cost_params import create_state_matrix
from ruspy.estimation.est_cost_params import derivative_loglike_cost_params
from ruspy.estimation.estimation import estimate
from ruspy.estimation.estimation_transitions import create_transition_matrix
from ruspy.model_code.cost_functions import cubic_costs
from ruspy.model_code.cost_functions import cubic_costs_dev
TEST_FOLDER = TEST_RESOURCES_DIR + "replication_test/"
@pytest.fixture(scope="module")
def inputs():
out = {}
disc_fac = 0.9999
num_states = 90
scale = 1e-8
init_dict = {
"model_specifications": {
"discount_factor": disc_fac,
"number_states": num_states,
"maint_cost_func": "cubic",
"cost_scale": scale,
},
"optimizer": {"approach": "NFXP", "algorithm": "scipy_lbfgsb"},
}
df = pd.read_pickle(TEST_FOLDER + "group_4.pkl")
result_trans, result_fixp = estimate(init_dict, df)
out["trans_est"] = result_trans["x"]
out["params_est"] = result_fixp["x"]
out["trans_ll"] = result_trans["fun"]
out["cost_ll"] = result_fixp["fun"]
out["states"] = df.loc[:, "state"].to_numpy(int)
out["decisions"] = df.loc[:, "decision"].to_numpy(int)
out["disc_fac"] = disc_fac
out["num_states"] = num_states
out["scale"] = scale
out["status"] = result_fixp["status"]
return out
@pytest.fixture(scope="module")
def outputs():
out = {}
out["trans_base"] = np.loadtxt(TEST_FOLDER + "repl_test_trans.txt")
out["params_base"] = np.loadtxt(TEST_FOLDER + "repl_params_cubic.txt")
out["transition_count"] = np.loadtxt(TEST_FOLDER + "transition_count.txt")
out["trans_ll"] = 3140.570557
out["cost_ll"] = 164.632939 # 162.885
return out
def test_repl_params(inputs, outputs):
| assert_array_almost_equal(inputs["params_est"], outputs["params_base"], decimal=3) | numpy.testing.assert_array_almost_equal |
import numpy as np
import scipy as sp
from quaternion import from_rotation_matrix, quaternion, as_rotation_matrix
from rlbench.environment import Environment
from rlbench.action_modes import ArmActionMode, ActionMode
from rlbench.observation_config import ObservationConfig, CameraConfig
from rlbench.tasks import *
from pyrep.const import ConfigurationPathAlgorithms as Algos
from grasp_planner import GraspPlanner
from perception import CameraIntrinsics
from object_detector import container_detector
import cv2
import matplotlib.pyplot as plt
import time
def skew(x):
return np.array([[0, -x[2], x[1]],
[x[2], 0, -x[0]],
[-x[1], x[0], 0]])
def sample_normal_pose(pos_scale, rot_scale):
'''
Samples a 6D pose from a zero-mean isotropic normal distribution
'''
pos = np.random.normal(scale=pos_scale)
eps = skew(np.random.normal(scale=rot_scale))
R = sp.linalg.expm(eps)
quat_wxyz = from_rotation_matrix(R)
return pos, quat_wxyz
def noisy_object(pose):
_pos_scale = [0.005] * 3
_rot_scale = [0.01] * 3
pos, quat_wxyz = sample_normal_pose(_pos_scale, _rot_scale)
gt_quat_wxyz = quaternion(pose[6], pose[3], pose[4], pose[5])
perturbed_quat_wxyz = quat_wxyz * gt_quat_wxyz
pose[:3] += pos
pose[3:] = [perturbed_quat_wxyz.x, perturbed_quat_wxyz.y, perturbed_quat_wxyz.z, perturbed_quat_wxyz.w]
return pose
class GraspController:
def __init__(self, action_mode, static_positions=True):
# Initialize environment with Action mode and observations
# Resize the write camera to fit the GQCNN
wrist_camera = CameraConfig(image_size=(1032, 772))
self.env = Environment(action_mode, '', ObservationConfig(wrist_camera=wrist_camera), False, static_positions=static_positions)
self.env.launch()
# Load specified task into the environment
self.task = self.env.get_task(EmptyContainer)
def reset(self):
descriptions, obs = self.task.reset()
return descriptions, obs
def get_objects(self, add_noise=False):
objs = self.env._scene._active_task.get_base().get_objects_in_tree(exclude_base=True, first_generation_only=False)
objs_dict = {}
for obj in objs:
name = obj.get_name()
pose = obj.get_pose()
if add_noise:
pose = noisy_object(pose)
objs_dict[name] = [obj, pose]
return objs_dict
def get_path(self, pose, set_orientation=False):
# TODO deal with situations when path not found
if set_orientation:
path = self.env._robot.arm.get_path(pose[:3], quaternion=pose[3:],
ignore_collisions=True, algorithm=Algos.RRTConnect, trials=1000)
else:
path = self.env._robot.arm.get_path(pose[:3], quaternion=np.array([0, 1, 0, 0]),
ignore_collisions=True, algorithm=Algos.RRTConnect, trials=1000)
return path
def grasp(self):
# TODO get feedback to check if grasp is successfull
done_grab_action = False
# Repeat unitil successfully grab the object
while not done_grab_action:
# gradually close the gripper
done_grab_action = self.env._robot.gripper.actuate(0, velocity=0.2) # 0 is close
self.env._pyrep.step()
# self.task._task.step()
# self.env._scene.step()
grasped_objects = {}
obj_list = ['Shape', 'Shape1', 'Shape3']
objs = self.env._scene._active_task.get_base().get_objects_in_tree(exclude_base=True, first_generation_only=False)
for obj in objs:
if obj.get_name() in obj_list:
grasped_objects[obj.get_name()] = self.env._robot.gripper.grasp(obj)
return grasped_objects
# return self.env._robot.gripper.get_grasped_objects()
def release(self):
done = False
while not done:
done = self.env._robot.gripper.actuate(1, velocity=0.2) # 1 is release
self.env._pyrep.step()
# self.task._task.step()
# self.env._scene.step()
self.env._robot.gripper.release()
def execute_path(self, path, open_gripper=True):
path = path._path_points.reshape(-1, path._num_joints)
for i in range(len(path)):
action = list(path[i]) + [int(open_gripper)]
obs, reward, terminate = self.task.step(action)
return obs, reward, terminate
### The following codes can work as well ###
# done = False
# path.set_to_start()
# while not done:
# done = path.step()
# a = path.visualize()
# self.env._scene.step()
# return done
if __name__ == "__main__":
# Get grasp planner using GQCNN
grasp_planner = GraspPlanner(model="GQCNN-2.0")
# Get large container empty detector
large_container_detector = container_detector(model='large_container_detector_model.pth')
# Get small container empty detector
small_container_detector = container_detector(model='small_container_detector_model.pth')
# Set Action Mode, See rlbench/action_modes.py for other action modes
action_mode = ActionMode(ArmActionMode.ABS_JOINT_POSITION)
# Create grasp controller with initialized environment and task
grasp_controller = GraspController(action_mode, static_positions=True)
# Reset task
descriptions, obs = grasp_controller.reset()
# The camera intrinsic in RLBench
camera_intr = CameraIntrinsics(fx=893.738, fy=893.738, cx=516, cy=386, frame='world', height=772, width=1032)
# The translation between camera and gripper
# TODO: Change the whole logic into detecting the object using GQCNN
object_initial_poses = {}
while True:
camera_to_gripper_translation = [0.022, 0, 0.095]
while True:
objs = grasp_controller.get_objects(add_noise=True)
# Go to home position
home_pose = np.copy(objs['waypoint0'][1])
home_pose[0] -= 0.022
path = grasp_controller.get_path(home_pose)
obs, reward, terminate = grasp_controller.execute_path(path, open_gripper=True)
# Scale the image and change the type to uint8 to fit the neural network
rgb = np.array(obs.wrist_rgb * 255, dtype='uint8')
# Change the image to BGR to fit the neural network
# p.s. The network is trained on BGR images
wrist_image = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR)
# Use network with trained model to check if the large container is empty or not
detector_start = time.time()
container_is_empty = large_container_detector.check_empty(image=wrist_image)
plt.figure(figsize=(8, 8))
plt.imshow(cv2.cvtColor(wrist_image, cv2.COLOR_BGR2RGB))
if container_is_empty:
plt.title('The large container is empty? \n Prediction Result: True. Time used: {0:.2f}sec '
'\n Forward Finished, Start Resetting'.format(time.time()-detector_start))
plt.show()
break
else:
plt.title('The large container is empty? \n Prediction Result: False. Time used: {0:.2f}sec '
'\n Continue Grasping'.format(time.time() - detector_start))
plt.show()
# Take depth picture and use GQCNN to predict grasping pose
# p.s. Need to scale the depth by 10 to fit GQCNN
depth = obs.wrist_depth*10
# Get the grasping pose relative to the current camera position (home position)
graspping_pose = np.copy(grasp_planner.plan_grasp(depth, rgb, camera_intr=camera_intr))
# Convert the relative grasping position to global grasping position
graspping_pose[:3] += home_pose[:3]
# Add extra distance between camera and gripper
graspping_pose[:3] += camera_to_gripper_translation
# Getting the path of reaching the target position
path = grasp_controller.get_path(graspping_pose, set_orientation=True)
# Execute the path
obs, reward, terminate = grasp_controller.execute_path(path, open_gripper=True)
# grasp the object and return a list of grasped objects
grasped_objects = grasp_controller.grasp()
print('Object graspping status:', grasped_objects)
for object in grasped_objects:
if grasped_objects[object]:
object_initial_poses[object] = graspping_pose
# move to home position
pose = np.copy(objs['waypoint0'][1])
path = grasp_controller.get_path(pose)
obs, reward, terminate = grasp_controller.execute_path(path, open_gripper=False)
# move above small container
rot = np.dot(as_rotation_matrix(quaternion(0, 0, 1, 0)),
np.array([[np.cos(np.pi / 2), -np.sin(np.pi / 2), 0],
[np.sin(np.pi / 2), np.cos(np.pi / 2), 0],
[0, 0, 1]]))
quat_wxyz = from_rotation_matrix(rot)
quat = np.array([quat_wxyz.x, quat_wxyz.y, quat_wxyz.z, quat_wxyz.w])
pose = np.copy(objs['waypoint3'][1])
pose[3:] = quat
path = grasp_controller.get_path(pose, set_orientation=True)
obs, reward, terminate = grasp_controller.execute_path(path, open_gripper=False)
pose[2] -= 0.15
path = grasp_controller.get_path(pose, set_orientation=True)
obs, reward, terminate = grasp_controller.execute_path(path, open_gripper=False)
# release the object
grasp_controller.release()
# move above small container
pose = np.copy(objs['waypoint3'][1])
pose[3:] = quat
path = grasp_controller.get_path(pose, set_orientation=True)
obs, reward, terminate = grasp_controller.execute_path(path, open_gripper=True)
break
camera_to_gripper_translation = [-0.013, -0.028, 0.1]
# TODO reset the task
while True:
objs = grasp_controller.get_objects(add_noise=True)
# move above small container
home_pose = np.copy(objs['waypoint3'][1])
home_pose[0] -= 0.01
home_pose[1] += 0.028
home_pose[2] -= 0.13
rot = np.dot(as_rotation_matrix(quaternion(0, 0, 1, 0)),
np.array([[np.cos(np.pi / 2), -np.sin(np.pi / 2), 0],
[np.sin(np.pi / 2), np.cos(np.pi / 2), 0],
[0, 0, 1]]))
quat_wxyz = from_rotation_matrix(rot)
grasping_quaternion = np.array([quat_wxyz.x, quat_wxyz.y, quat_wxyz.z, quat_wxyz.w])
home_pose[3:] = grasping_quaternion
path = grasp_controller.get_path(home_pose, set_orientation=True)
obs, reward, terminate = grasp_controller.execute_path(path, open_gripper=True)
# Get the rgb image and scale it by 255
rgb = | np.array(obs.wrist_rgb * 255, dtype='uint8') | numpy.array |
import collections
import numpy as np
from .base import ClassifierModule, LMModule, NERModule, MRCModule
from ..model.base import ClsDecoder, BinaryClsDecoder, SeqClsDecoder, SeqClsCrossDecoder, MRCDecoder
from ..model.bert import BERTEncoder, BERTDecoder, BERTConfig, create_instances_from_document, create_masked_lm_predictions, get_decay_power
from ..model.crf import CRFDecoder, viterbi_decode
from ..token import WordPieceTokenizer
from ..third import tf
from .. import com
class BERTClassifier(ClassifierModule):
""" Single-label classifier on BERT. """
_INFER_ATTRIBUTES = {
"max_seq_length": "An integer that defines max sequence length of input tokens",
"label_size": "An integer that defines number of possible labels of outputs",
"init_checkpoint": "A string that directs to the checkpoint file used for initialization",
}
def __init__(
self,
config_file,
vocab_file,
max_seq_length=128,
label_size=None,
init_checkpoint=None,
output_dir=None,
gpu_ids=None,
drop_pooler=False,
do_lower_case=True,
truncate_method="LIFO",
):
self.__init_args__ = locals()
super(ClassifierModule, self).__init__(init_checkpoint, output_dir, gpu_ids)
self.batch_size = 0
self.max_seq_length = max_seq_length
self.label_size = label_size
self.truncate_method = truncate_method
self._drop_pooler = drop_pooler
self._id_to_label = None
self.bert_config = BERTConfig.from_json_file(config_file)
self.tokenizer = WordPieceTokenizer(vocab_file, do_lower_case)
self.decay_power = get_decay_power(self.bert_config.num_hidden_layers)
if "[CLS]" not in self.tokenizer.vocab:
self.tokenizer.add("[CLS]")
self.bert_config.vocab_size += 1
tf.logging.info("Add necessary token `[CLS]` into vocabulary.")
if "[SEP]" not in self.tokenizer.vocab:
self.tokenizer.add("[SEP]")
self.bert_config.vocab_size += 1
tf.logging.info("Add necessary token `[SEP]` into vocabulary.")
def convert(self, X=None, y=None, sample_weight=None, X_tokenized=None, is_training=False, is_parallel=False):
self._assert_legal(X, y, sample_weight, X_tokenized)
if is_training:
assert y is not None, "`y` can't be None."
if is_parallel:
assert self.label_size, "Can't parse data on multi-processing when `label_size` is None."
n_inputs = None
data = {}
# convert X
if X or X_tokenized:
tokenized = False if X else X_tokenized
input_ids, input_mask, segment_ids = self._convert_X(X_tokenized if tokenized else X, tokenized=tokenized)
data["input_ids"] = np.array(input_ids, dtype=np.int32)
data["input_mask"] = np.array(input_mask, dtype=np.int32)
data["segment_ids"] = np.array(segment_ids, dtype=np.int32)
n_inputs = len(input_ids)
if n_inputs < self.batch_size:
self.batch_size = max(n_inputs, len(self._gpu_ids))
# convert y
if y:
label_ids = self._convert_y(y)
data["label_ids"] = np.array(label_ids, dtype=np.int32)
# convert sample_weight
if is_training or y:
sample_weight = self._convert_sample_weight(sample_weight, n_inputs)
data["sample_weight"] = np.array(sample_weight, dtype=np.float32)
return data
def _convert_X(self, X_target, tokenized):
# tokenize input texts
segment_input_tokens = []
for idx, sample in enumerate(X_target):
try:
segment_input_tokens.append(self._convert_x(sample, tokenized))
except Exception:
raise ValueError("Wrong input format (line %d): \"%s\". " % (idx, sample))
input_ids = []
input_mask = []
segment_ids = []
for idx, segments in enumerate(segment_input_tokens):
_input_tokens = ["[CLS]"]
_input_ids = []
_input_mask = [1]
_segment_ids = [0]
com.truncate_segments(segments, self.max_seq_length - len(segments) - 1, truncate_method=self.truncate_method)
for s_id, segment in enumerate(segments):
_segment_id = min(s_id, 1)
_input_tokens.extend(segment + ["[SEP]"])
_input_mask.extend([1] * (len(segment) + 1))
_segment_ids.extend([_segment_id] * (len(segment) + 1))
_input_ids = self.tokenizer.convert_tokens_to_ids(_input_tokens)
# padding
for _ in range(self.max_seq_length - len(_input_ids)):
_input_ids.append(0)
_input_mask.append(0)
_segment_ids.append(0)
input_ids.append(_input_ids)
input_mask.append(_input_mask)
segment_ids.append(_segment_ids)
return input_ids, input_mask, segment_ids
def _convert_x(self, x, tokenized):
if not tokenized:
# deal with general inputs
if isinstance(x, str):
return [self.tokenizer.tokenize(x)]
# deal with multiple inputs
return [self.tokenizer.tokenize(seg) for seg in x]
# deal with tokenized inputs
if isinstance(x[0], str):
return [x]
# deal with tokenized and multiple inputs
return x
def _convert_y(self, y):
label_set = set(y)
# automatically set `label_size`
if self.label_size:
assert len(label_set) <= self.label_size, "Number of unique `y`s exceeds `label_size`."
else:
self.label_size = len(label_set)
# automatically set `id_to_label`
if not self._id_to_label:
self._id_to_label = list(label_set)
try:
# Allign if user inputs continual integers.
# e.g. [2, 0, 1]
self._id_to_label = list(sorted(self._id_to_label))
except Exception:
pass
if len(self._id_to_label) < self.label_size:
self._id_to_label = list(range(self.label_size))
# automatically set `label_to_id` for prediction
self._label_to_id = {label: index for index, label in enumerate(self._id_to_label)}
label_ids = [self._label_to_id[label] for label in y]
return label_ids
def _set_placeholders(self, target, on_export=False, **kwargs):
self.placeholders = {
"input_ids": com.get_placeholder(target, "input_ids", [None, self.max_seq_length], tf.int32),
"input_mask": com.get_placeholder(target, "input_mask", [None, self.max_seq_length], tf.int32),
"segment_ids": com.get_placeholder(target, "segment_ids", [None, self.max_seq_length], tf.int32),
"label_ids": com.get_placeholder(target, "label_ids", [None], tf.int32),
}
if not on_export:
self.placeholders["sample_weight"] = com.get_placeholder(target, "sample_weight", [None], tf.float32)
def _forward(self, is_training, split_placeholders, **kwargs):
encoder = BERTEncoder(
bert_config=self.bert_config,
is_training=is_training,
input_ids=split_placeholders["input_ids"],
input_mask=split_placeholders["input_mask"],
segment_ids=split_placeholders["segment_ids"],
drop_pooler=self._drop_pooler,
**kwargs,
)
encoder_output = encoder.get_pooled_output()
decoder = ClsDecoder(
is_training=is_training,
input_tensor=encoder_output,
label_ids=split_placeholders["label_ids"],
label_size=self.label_size,
sample_weight=split_placeholders.get("sample_weight"),
scope="cls/seq_relationship",
**kwargs,
)
return decoder.get_forward_outputs()
def _get_fit_ops(self, as_feature=False):
ops = [self._tensors["preds"], self._tensors["losses"]]
if as_feature:
ops.extend([self.placeholders["label_ids"]])
return ops
def _get_fit_info(self, output_arrays, feed_dict, as_feature=False):
if as_feature:
batch_labels = output_arrays[-1]
else:
batch_labels = feed_dict[self.placeholders["label_ids"]]
# accuracy
batch_preds = output_arrays[0]
accuracy = np.mean(batch_preds == batch_labels)
# loss
batch_losses = output_arrays[1]
loss = np.mean(batch_losses)
info = ""
info += ", accuracy %.4f" % accuracy
info += ", loss %.6f" % loss
return info
def _get_predict_ops(self):
return [self._tensors["probs"]]
def _get_predict_outputs(self, batch_outputs):
n_inputs = len(list(self.data.values())[0])
output_arrays = list(zip(*batch_outputs))
# probs
probs = com.transform(output_arrays[0], n_inputs)
# preds
preds = np.argmax(probs, axis=-1).tolist()
if self._id_to_label:
preds = [self._id_to_label[idx] for idx in preds]
outputs = {}
outputs["preds"] = preds
outputs["probs"] = probs
return outputs
def _get_score_ops(self):
return [self._tensors["preds"], self._tensors["losses"]]
def _get_score_outputs(self, batch_outputs):
n_inputs = len(list(self.data.values())[0])
output_arrays = list(zip(*batch_outputs))
# accuracy
preds = com.transform(output_arrays[0], n_inputs)
labels = self.data["label_ids"]
accuracy = np.mean(preds == labels)
# loss
losses = com.transform(output_arrays[1], n_inputs)
loss = np.mean(losses)
outputs = {}
outputs["accuracy"] = accuracy
outputs["loss"] = loss
return outputs
class BERTBinaryClassifier(BERTClassifier, ClassifierModule):
""" Multi-label classifier on BERT. """
_INFER_ATTRIBUTES = BERTClassifier._INFER_ATTRIBUTES
def __init__(
self,
config_file,
vocab_file,
max_seq_length=128,
label_size=None,
label_weight=None,
init_checkpoint=None,
output_dir=None,
gpu_ids=None,
drop_pooler=False,
do_lower_case=True,
truncate_method="LIFO",
):
self.__init_args__ = locals()
super(ClassifierModule, self).__init__(init_checkpoint, output_dir, gpu_ids)
self.batch_size = 0
self.max_seq_length = max_seq_length
self.label_size = label_size
self.label_weight = label_weight
self.truncate_method = truncate_method
self._drop_pooler = drop_pooler
self._id_to_label = None
self.bert_config = BERTConfig.from_json_file(config_file)
self.tokenizer = WordPieceTokenizer(vocab_file, do_lower_case)
self.decay_power = get_decay_power(self.bert_config.num_hidden_layers)
if "[CLS]" not in self.tokenizer.vocab:
self.tokenizer.add("[CLS]")
self.bert_config.vocab_size += 1
tf.logging.info("Add necessary token `[CLS]` into vocabulary.")
if "[SEP]" not in self.tokenizer.vocab:
self.tokenizer.add("[SEP]")
self.bert_config.vocab_size += 1
tf.logging.info("Add necessary token `[SEP]` into vocabulary.")
def _convert_y(self, y):
try:
label_set = set()
for sample in y:
_label_set = set()
for _y in sample:
assert _y not in _label_set
label_set.add(_y)
_label_set.add(_y)
except Exception:
raise ValueError("The element of `y` should be a list of multiple answers. E.g. y=[[1, 3], [0], [0, 2]].")
# automatically set `label_size`
if self.label_size:
assert len(label_set) <= self.label_size, "Number of unique labels exceeds `label_size`."
else:
self.label_size = len(label_set)
# automatically set `id_to_label`
if not self._id_to_label:
self._id_to_label = list(label_set)
try:
# Allign if user inputs continual integers.
# e.g. [2, 0, 1]
self._id_to_label = list(sorted(self._id_to_label))
except Exception:
pass
if len(self._id_to_label) < self.label_size:
self._id_to_label = list(range(self.label_size))
# automatically set `label_to_id` for prediction
self._label_to_id = {label: index for index, label in enumerate(self._id_to_label)}
label_ids = [[1 if self._id_to_label[i] in sample else 0 for i in range(self.label_size)] for sample in y]
return label_ids
def _set_placeholders(self, target, on_export=False, **kwargs):
self.placeholders = {
"input_ids": com.get_placeholder(target, "input_ids", [None, self.max_seq_length], tf.int32),
"input_mask": com.get_placeholder(target, "input_mask", [None, self.max_seq_length], tf.int32),
"segment_ids": com.get_placeholder(target, "segment_ids", [None, self.max_seq_length], tf.int32),
"label_ids": com.get_placeholder(target, "label_ids", [None, self.label_size], tf.int32),
}
if not on_export:
self.placeholders["sample_weight"] = com.get_placeholder(target, "sample_weight", [None], tf.float32)
def _forward(self, is_training, split_placeholders, **kwargs):
encoder = BERTEncoder(
bert_config=self.bert_config,
is_training=is_training,
input_ids=split_placeholders["input_ids"],
input_mask=split_placeholders["input_mask"],
segment_ids=split_placeholders["segment_ids"],
drop_pooler=self._drop_pooler,
**kwargs,
)
encoder_output = encoder.get_pooled_output()
decoder = BinaryClsDecoder(
is_training=is_training,
input_tensor=encoder_output,
label_ids=split_placeholders["label_ids"],
label_size=self.label_size,
sample_weight=split_placeholders.get("sample_weight"),
label_weight=self.label_weight,
scope="cls/seq_relationship",
**kwargs,
)
return decoder.get_forward_outputs()
def _get_predict_ops(self):
return [self._tensors["probs"]]
def _get_predict_outputs(self, batch_outputs):
n_inputs = len(list(self.data.values())[0])
output_arrays = list(zip(*batch_outputs))
# probs
probs = com.transform(output_arrays[0], n_inputs)
# preds
preds = (probs >= 0.5)
if self._id_to_label:
preds = [[self._id_to_label[i] for i in range(self.label_size) if _preds[i]] for _preds in preds]
else:
preds = [[i for i in range(self.label_size) if _preds[i]] for _preds in preds]
outputs = {}
outputs["preds"] = preds
outputs["probs"] = probs
return outputs
class BERTSeqClassifier(BERTClassifier, ClassifierModule):
""" Sequence labeling classifier on BERT. """
_INFER_ATTRIBUTES = BERTClassifier._INFER_ATTRIBUTES
def __init__(
self,
config_file,
vocab_file,
max_seq_length=128,
label_size=None,
init_checkpoint=None,
output_dir=None,
gpu_ids=None,
do_lower_case=True,
truncate_method="LIFO",
):
self.__init_args__ = locals()
super(ClassifierModule, self).__init__(init_checkpoint, output_dir, gpu_ids)
self.batch_size = 0
self.max_seq_length = max_seq_length
self.label_size = label_size
self.truncate_method = truncate_method
self._id_to_label = None
self.bert_config = BERTConfig.from_json_file(config_file)
self.tokenizer = WordPieceTokenizer(vocab_file, do_lower_case)
self.decay_power = get_decay_power(self.bert_config.num_hidden_layers)
def convert(self, X=None, y=None, sample_weight=None, X_tokenized=None, is_training=False, is_parallel=False):
self._assert_legal(X, y, sample_weight, X_tokenized)
if is_training:
assert y is not None, "`y` can't be None."
if is_parallel:
assert self.label_size, "Can't parse data on multi-processing when `label_size` is None."
n_inputs = None
data = {}
# convert X
if X or X_tokenized:
tokenized = False if X else X_tokenized
input_ids, input_mask, segment_ids = self._convert_X(X_tokenized if tokenized else X, tokenized=tokenized)
data["input_ids"] = np.array(input_ids, dtype=np.int32)
data["input_mask"] = np.array(input_mask, dtype=np.int32)
data["segment_ids"] = np.array(segment_ids, dtype=np.int32)
n_inputs = len(input_ids)
if n_inputs < self.batch_size:
self.batch_size = max(n_inputs, len(self._gpu_ids))
if y:
# convert y and sample_weight
label_ids = self._convert_y(y)
data["label_ids"] = np.array(label_ids, dtype=np.int32)
# convert sample_weight
if is_training or y:
sample_weight = self._convert_sample_weight(sample_weight, n_inputs)
data["sample_weight"] = np.array(sample_weight, dtype=np.float32)
return data
def _convert_X(self, X_target, tokenized):
input_ids = []
input_mask = []
segment_ids = []
# tokenize input texts
for idx, sample in enumerate(X_target):
_input_tokens = self._convert_x(sample, tokenized)
com.truncate_segments([_input_tokens], self.max_seq_length, truncate_method=self.truncate_method)
_input_ids = self.tokenizer.convert_tokens_to_ids(_input_tokens)
_input_mask = [1 for _ in range(len(_input_tokens))]
_segment_ids = [0 for _ in range(len(_input_tokens))]
# padding
for _ in range(self.max_seq_length - len(_input_ids)):
_input_ids.append(0)
_input_mask.append(0)
_segment_ids.append(0)
input_ids.append(_input_ids)
input_mask.append(_input_mask)
segment_ids.append(_segment_ids)
return input_ids, input_mask, segment_ids
def _convert_x(self, x, tokenized):
if not tokenized:
raise ValueError("Inputs of sequence classifier must be already tokenized and fed into `X_tokenized`.")
# deal with tokenized inputs
if isinstance(x[0], str):
return x
# deal with tokenized and multiple inputs
raise ValueError("Sequence classifier does not support multi-segment inputs.")
def _convert_y(self, y):
try:
label_set = set()
for sample in y:
for _y in sample:
label_set.add(_y)
except Exception:
raise ValueError("The element of `y` should be a list of labels.")
# automatically set `label_size`
if self.label_size:
assert len(label_set) <= self.label_size, "Number of unique `y`s exceeds `label_size`."
else:
self.label_size = len(label_set)
# automatically set `id_to_label`
if not self._id_to_label:
self._id_to_label = list(label_set)
try:
# Allign if user inputs continual integers.
# e.g. [2, 0, 1]
self._id_to_label = list(sorted(self._id_to_label))
except Exception:
pass
if len(self._id_to_label) < self.label_size:
self._id_to_label = list(range(self.label_size))
# automatically set `label_to_id` for prediction
self._label_to_id = {label: index for index, label in enumerate(self._id_to_label)}
label_ids = []
for sample in y:
sample = [label for label in sample]
num_labels = len(sample)
if num_labels < self.max_seq_length:
sample.extend([0] * (self.max_seq_length - num_labels))
elif num_labels > self.max_seq_length:
sample = sample[:self.max_seq_length]
com.truncate_segments([sample], self.max_seq_length, truncate_method=self.truncate_method)
_label_ids = [self._label_to_id[label] for label in sample]
label_ids.append(_label_ids)
return label_ids
def _set_placeholders(self, target, on_export=False, **kwargs):
self.placeholders = {
"input_ids": com.get_placeholder(target, "input_ids", [None, self.max_seq_length], tf.int32),
"input_mask": com.get_placeholder(target, "input_mask", [None, self.max_seq_length], tf.int32),
"segment_ids": com.get_placeholder(target, "segment_ids", [None, self.max_seq_length], tf.int32),
"label_ids": com.get_placeholder(target, "label_ids", [None, self.max_seq_length], tf.int32),
}
if not on_export:
self.placeholders["sample_weight"] = com.get_placeholder(target, "sample_weight", [None], tf.float32)
def _forward(self, is_training, split_placeholders, **kwargs):
encoder = BERTEncoder(
bert_config=self.bert_config,
is_training=is_training,
input_ids=split_placeholders["input_ids"],
input_mask=split_placeholders["input_mask"],
segment_ids=split_placeholders["segment_ids"],
**kwargs,
)
encoder_output = encoder.get_sequence_output()
decoder = SeqClsDecoder(
is_training=is_training,
input_tensor=encoder_output,
input_mask=split_placeholders["input_mask"],
label_ids=split_placeholders["label_ids"],
label_size=self.label_size,
sample_weight=split_placeholders.get("sample_weight"),
scope="cls/sequence",
**kwargs,
)
return decoder.get_forward_outputs()
def _get_fit_ops(self, as_feature=False):
ops = [self._tensors["preds"], self._tensors["losses"]]
if as_feature:
ops.extend([self.placeholders["input_mask"], self.placeholders["label_ids"]])
return ops
def _get_fit_info(self, output_arrays, feed_dict, as_feature=False):
if as_feature:
batch_mask = output_arrays[-2]
batch_labels = output_arrays[-1]
else:
batch_mask = feed_dict[self.placeholders["input_mask"]]
batch_labels = feed_dict[self.placeholders["label_ids"]]
# accuracy
batch_preds = output_arrays[0]
accuracy = (np.sum((batch_preds == batch_labels) * batch_mask) / batch_mask.sum())
# loss
batch_losses = output_arrays[1]
loss = np.mean(batch_losses)
info = ""
info += ", accuracy %.4f" % accuracy
info += ", loss %.6f" % loss
return info
def _get_predict_ops(self):
return [self._tensors["probs"]]
def _get_predict_outputs(self, batch_outputs):
n_inputs = len(list(self.data.values())[0])
output_arrays = list(zip(*batch_outputs))
# probs
probs = com.transform(output_arrays[0], n_inputs)
# preds
all_preds = np.argmax(probs, axis=-1)
mask = self.data["input_mask"]
preds = []
for _preds, _mask in zip(all_preds, mask):
input_length = np.sum(_mask)
_preds = _preds[:input_length].tolist()
if self._id_to_label:
_preds = [self._id_to_label[idx] for idx in _preds]
preds.append(_preds)
outputs = {}
outputs["preds"] = preds
outputs["probs"] = probs
return outputs
def _get_score_ops(self):
return [self._tensors["preds"], self._tensors["losses"]]
def _get_score_outputs(self, batch_outputs):
n_inputs = len(list(self.data.values())[0])
output_arrays = list(zip(*batch_outputs))
# accuracy
preds = com.transform(output_arrays[0], n_inputs)
labels = self.data["label_ids"]
mask = self.data["input_mask"]
accuracy = (np.sum((preds == labels) * mask) / mask.sum())
# loss
losses = com.transform(output_arrays[1], n_inputs)
loss = | np.mean(losses) | numpy.mean |
import numpy as np
from dolfin import *
import logging
import pnptransport.utils as utils
# import traceback
# import sys
# from logwriter import LoggerWriter
import h5py
import os
from typing import Union
import scipy.constants as constants
from scipy import integrate
# import configparser
# parameters['form_compiler']['optimize'] = True
# parameters['form_compiler']['cpp_optimize'] = True
# parameters['form_compiler']['cpp_optimize_flags'] = '-O3'
q_red = 1.6021766208 # x 1E-19 C
e0_red = 8.854187817620389 # x 1E-12 C^2 / J m
CM3TOUM3 = 1E-12
def two_layers_constant_source(D1cms: float, D2cms: float, Cs: float, h: float,
m: float, thickness_sinx: float,
thickness_si: float, tempC: float,
voltage: float, time_s: Union[float, int],
recovery_time_s: Union[float, int] = 0,
recovery_voltage: float = 0,
**kwargs):
"""
This function simulates the flatband voltage as a function of time for a
MIS device where Na is migrating into the cell. It also returns a matrix
with the concentration profiles as a function of time.
The system solves Poisson-Nernst-Planck equation for a single species.
*Example*
.. code-block:: python
import pnptransport.infinitesource as pnpis
import logging
D1, D2 = 1E-16, 1E-15
Cs = 1E16
h, m = 1E-12, 1.0
thickness_1, thickness_2 = 75E-7, 1.
temp_c = 60.
voltage = 0.75
time_s = 86400.
h5FileName = 'simulation_output.h5'
# Chose a small time step to reduce truncation error in the TR-BDF2
t_steps = 3600
# Create a logger
logFile = 'simulation_output.log'
my_logger = logging.getLogger('simlog')
my_logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler(logFile)
fh.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# add the handlers to the logger
my_logger.addHandler(fh)
my_logger.addHandler(ch)
vfb, tsim, x1, c1, x2, c2, cmax = pnpis.two_layers_constant_source(
D1cms=D1, D2cms=D2,
Cs=Cs, h=h, m=m, thickness_sinx=thickness_1, thickness_si=thickness_2,
tempC=temp_c, voltage=voltage, time_s=time_s,
tsteps=t_steps, h5_storage=h5FileName, er=7.0
)
Parameters
----------
D1cms: float
The diffusion coefficient of Na in the dielectric (cm\ :sup:`2`\/s)
D2cms: float
The diffusion coefficient of Na in silicon (cm\ :sup:`2`\/s)
Cs: float
The source concentration (1/cm\ :sup:`3`\)
h: float
The surface mass transfer coefficient (cm/s)
m: float
The segregation coefficient (unitless)
thickness_sinx: float
The thickness of the simulated dielectric layer (um)
thickness_si: float
The thickness of the simulated silicon layer (um)
tempC: Union[float, int]
The temperature in °C
voltage: Union[float, int]
The voltage applied to the dielectric (V)
time_s: Union[float, int]
The simulation time in seconds
recovery_time_s: Union[float, int]
An additional simulation time during which, no electrical stress is applied (s).
recovery_voltage: float
If provided a recovery time, the bias at which it will recover (V).
**kwargs:
cbulk: double
The base concentration cm\ :sup:`-3`\.
xpoints_sinx: int
The number of cells in the sinx layer.
xpoints_si: int
The number of cells in the si layer.
z: integer
The valency of the ion.
default: 1
er: double
The relative permittivity of the dielectric.
xpoints: int
The number of x points to simulate.
fcall: int
The number of times the function has been called to solve the same
problem.
tsteps: int
The number of time steps to simulate.
max_calls: int
The maximum number of times the function can be recursively call if the convergence fails.
max_iter: int
The maximum number of iterations for the solver.
relaxation_parameter: float
The relaxation w for the Newton algorithm.
t_smear: int, float
The time in seconds taken to "smooth" the initial profile (assuming a constant concentration diffused
over t_smear).
h5fn: str
The path to the h5 file to store the simulation results.
debug: bool
True if debugging the function.
Returns
-------
Vfb: np.ndarray
An array containing the flat band voltage shift as a function of time
in (V)
tsim: np.ndarray
The time for each flatband voltage point in seconds.
x1: np.ndarray
The depth of the concentration profile in SiNx in um.
c1: np.ndarray
The final concentration profile as a function of depth in SiNx in cm\ :sup:`-3`\.
potential: np.ndarray
The final potential profile as a function of depth in SiNx in V.
x2: np.ndarray
The depth of the concentration profile in Si in um.
c2: np.ndarray
The final concentration profile in Si in cm\ :sup:`-3`\.
cmax: float
The maximum concentration in silicon nitride in cm\ :sup:`-3`\.
"""
Cbulk = kwargs.get('cbulk', 1E-20)
xpoints_sinx = kwargs.get('xpoints_sinx', 1000)
xpoints_si = kwargs.get('xpoints_si', 1000)
fcall = kwargs.get('fcall', 1)
tsteps = kwargs.get('tsteps', 400)
max_calls = kwargs.get('max_rcalls', 3)
max_iter = kwargs.get('max_iter', 500)
er = kwargs.get('er', 7.0)
z = kwargs.get('z', 1.0)
t_smear = kwargs.get('t_smear', 60)
h5fn = kwargs.get('h5_storage', None)
debug = kwargs.get('debug', False)
relaxation_parameter = kwargs.get('relaxation_parameter', 1.0)
fcallLogger = logging.getLogger('simlog')
# Chose the backend type
if has_linear_algebra_backend("PETSc"):
parameters["linear_algebra_backend"] = "PETSc"
# print('PETSc linear algebra backend found.')
elif has_linear_algebra_backend("Eigen"):
parameters["linear_algebra_backend"] = "Eigen"
else:
fcallLogger.warning("DOLFIN has not been configured with PETSc or Eigen.")
exit()
L1 = thickness_sinx # thickness*1.05
L2 = thickness_si
L = L1 + L2
M1 = xpoints_sinx
M2 = xpoints_si
N = tsteps
E = voltage / thickness_sinx / 100 / er
dt = time_s / N
# Estimate the diffusion coefficients for the given temperature
tempK = tempC + 273.15
# Transform everything to um, s
D1ums = D1cms * 1E8
D2ums = D2cms * 1E8
hums = h * 1E4 # ums/s
# If the diffusion coefficient is negative, something is wrong! Return an array with zeros
if D1cms <= 0:
vfb = np.zeros(N + 1)
x1 = np.linspace(0, thickness_sinx, M1)
x2 = np.linspace(thickness_sinx, thickness_sinx + thickness_si, M2)
c1 = np.ones(M1) * Cbulk
c2 = np.ones(M1) * Cbulk * m
potential = voltage - x1 * voltage / thickness_sinx
t_sim = np.linspace(0, time_s, (N + 1))
return vfb, t_sim, x1, c1, potential, x2, c2, Cbulk
# The constant mobility in um/s/V
mu1 = z * constants.elementary_charge * D1ums / (constants.Boltzmann * tempK)
# mu2 = 0.0
# The constant ze/(epsilon0,*er) in V*um
qee = z * constants.elementary_charge / (er * constants.epsilon_0) * 1E6
# MV/cm x (10^6 V / 1 MV) x ( 10^2 cm / 1 m) = 10^8 V/m = 10^8 J/C/m
# cm2/s x (1 m / 10^2 cm)^2 = 10^-4 m^2/s
# J/C/m x C / J * m^2/s = m/s x (10^6 um / 1 m) = 10^6 um/s
vd1 = constants.elementary_charge * (E * 1E8) * (D1cms * 1E-4) * 1E6 / (constants.Boltzmann * tempK)
# vd2 = 0.0 # constants.elementary_charge*(E2*1E8)*(D2cms*1E-4)*1E6/(constants.Boltzmann*TempK)
set_log_level(50)
logging.getLogger('FFC').setLevel(logging.WARNING)
# config = configparser.ConfigParser()
# config['partial_fit'] = {
# 'Cs': Cs,
# 'D1cms': D1cms,
# 'D2cms': D2cms,
# 'h': h,
# 'm': m
# }
# fh = fcallLogger.handlers[0]
# logger_filename = fh.baseFilename
# partial_fit_filename = os.path.splitext(logger_filename)[0] + '_partial_fit.ini'
# with open(partial_fit_filename, 'w') as config_file:
# config.write(config_file)
if debug:
fcallLogger.info('********* Global parameters *********')
fcallLogger.info('-------------------------------------')
fcallLogger.info('Time: {0}'.format(utils.format_time_str(time_s)))
fcallLogger.info('Time step: {0}'.format(utils.format_time_str(time_s / tsteps)))
fcallLogger.info('Temperature: {0:.1f} °C'.format(tempC))
fcallLogger.info('Cs: {0:.4E} cm^-3'.format(Cs))
fcallLogger.info('h: {0:.4E} cm/s'.format(h))
fcallLogger.info('m: {0:.4E}'.format(m))
fcallLogger.info('Recovery time: {0}.'.format(utils.format_time_str(recovery_time_s)))
fcallLogger.info('*************** SiNx ******************')
fcallLogger.info('Thickness: {0:.1E} um'.format(thickness_sinx))
fcallLogger.info('er: {0:.1f}'.format(er))
fcallLogger.info('Voltage: {0:.1f} V'.format(voltage))
fcallLogger.info('Electric Field: {0:.3E} MV/cm'.format(E * er))
fcallLogger.info('Electric Field (Effective): {0:.3E} MV/cm'.format(E))
fcallLogger.info('D: {0:.3E} cm^2/s'.format(D1cms))
fcallLogger.info('Ionic mobility: {0:.3E} um^2/ V*s'.format(mu1))
fcallLogger.info('Drift velocity: {0:.3E} um/s'.format(vd1))
fcallLogger.info('**************** Si *******************')
fcallLogger.info('Thickness: {0:.1E} um'.format(thickness_si))
fcallLogger.info('er: {0:.1f}'.format(11.9))
fcallLogger.info('Voltage: {0:.1f} V'.format(0.0))
fcallLogger.info('Electric Field: {0:.3E} MV/cm'.format(0.0))
fcallLogger.info('Electric Field (Effective): {0:.3E} MV/cm'.format(0.0))
fcallLogger.info('D: {0:.3E} cm^2/s'.format(D2cms))
fcallLogger.info('Ionic mobility: {0:.3E} cm^2/ V*s'.format(0.0))
fcallLogger.info('Drift velocity: {0:.3E} cm/s'.format(0.0))
# Create classes for defining parts of the boundaries and the interior
# of the domain
tol = 1E-14
class Top(SubDomain):
def inside(self, x_, on_boundary):
return near(x_[0], 0.0, tol) and on_boundary
class InnerBoundary(SubDomain):
def inside(self, x_, on_boundary):
return near(x_[0], L1, tol) and on_boundary
class Bottom(SubDomain):
def inside(self, x_, on_boundary):
return near(x_[0], L, tol) and on_boundary
def get_solution_array1(mesh, sol):
c_, phi = sol.split()
xu = mesh.coordinates()
cu = c_.compute_vertex_values(mesh) * 1E12
pu = phi.compute_vertex_values(mesh)
xyz = np.array([(xu[j], cu[j], pu[j]) for j in range(len(xu))], dtype=[('x', 'd'), ('c', 'd'), ('phi', 'd')])
xyz.sort(order='x')
return xyz['x'], xyz['c'], xyz['phi']
def get_solution_array2(mesh, sol):
xu = mesh.coordinates()
yu = sol.compute_vertex_values(mesh) * 1E12
xy = np.array([(xu[j], yu[j]) for j in range(len(xu))], dtype=[('x', 'd'), ('y', 'd')])
xy.sort(order='x')
return xy['x'], xy['y']
top = Top()
bottom = Bottom()
innerBoundaryL = InnerBoundary()
innerBoundaryR = InnerBoundary()
# Create mesh and define function space
mesh1 = IntervalMesh(M1, 0.0, L1)
mesh2 = IntervalMesh(M2, L1, L)
nor = 2
dr = L1 * 0.2
for i in range(nor):
cell_markers = MeshFunction("bool", mesh1, mesh1.topology().dim(), False)
for cell in cells(mesh1):
p = cell.midpoint()
if p[0] >= L1 - dr or p[0] <= dr:
cell_markers[cell] = True
mesh1 = refine(mesh1, cell_markers)
dr = dr / 1.5
nor = 3
dr = L2 * 1.1
for i in range(nor):
cell_markers = MeshFunction("bool", mesh2, mesh2.topology().dim(), False)
for cell in cells(mesh2):
p = cell.midpoint()
if p[0] <= dr:
cell_markers[cell] = True
mesh2 = refine(mesh2, cell_markers)
dr = dr / 1.5
if debug:
fcallLogger.info('Refined meshes.')
gdim1 = len(mesh1.coordinates())
gdim2 = len(mesh2.coordinates())
fcallLogger.info('********** Mesh 1 **********')
fcallLogger.info('Elements: {0}'.format(gdim1))
fcallLogger.info('MIN DX: {0:.3E} um, MAX DX {1:.3E}'.format(mesh1.hmin(), mesh1.hmax()))
fcallLogger.info('********** Mesh 2 **********')
fcallLogger.info('Elements: {0}'.format(gdim2))
fcallLogger.info('MIN DX: {0:.3E} um, MAX DX {1:.3E}'.format(mesh2.hmin(), mesh2.hmax()))
# Initialize mesh function for boundary domains
boundaries1 = MeshFunction("size_t", mesh1, mesh1.topology().dim() - 1)
boundaries2 = MeshFunction("size_t", mesh2, mesh2.topology().dim() - 1)
boundaries1.set_all(0)
boundaries2.set_all(0)
top.mark(boundaries1, 1)
innerBoundaryL.mark(boundaries1, 2)
innerBoundaryR.mark(boundaries2, 1)
bottom.mark(boundaries2, 2)
# Define the measures
ds1 = Measure('ds', domain=mesh1, subdomain_data=boundaries1)
ds2 = Measure('ds', domain=mesh2, subdomain_data=boundaries2)
dx1 = Measure('dx', domain=mesh1, subdomain_data=boundaries1)
dx2 = Measure('dx', domain=mesh2, subdomain_data=boundaries2)
# Add some initial Na profile
# Define the initial concentration in both layers
# Dsmear = D1ums / 1000
# u1i = Expression(('c1+(cs-c1)*(1-erf(x[0]/(2*sqrt(D*t))))', '(1-x[0]/L)*Vapp/er'),
# c1=Cbulk * CM3TOUM3, L=L1, cs=Cs * 1E-12,
# D=Dsmear, t=t_smear,
# Vapp=float(voltage), er=er, degree=1)
# Just add background concentration
u1i = Expression(('cb', '(1-x[0]/L)*Vapp/er'),
cb=Cbulk * CM3TOUM3, L=L1,
Vapp=float(voltage), er=er, degree=1)
u2i = Expression('cb', cb=Cbulk * CM3TOUM3 * m, degree=0)
# Defining the mixed function space
CG1 = FiniteElement("CG", mesh1.ufl_cell(), 1)
W_elem = MixedElement([CG1, CG1])
W = FunctionSpace(mesh1, W_elem)
V2 = FunctionSpace(mesh2, 'CG', 1)
# Defining the "Trial" functions
u1 = interpolate(u1i, W) # For time i+1
c1, phi1 = split(u1)
u1_G = interpolate(u1i, W) # For time i+1/2
c1_G, phi1_G = split(u1_G)
u1_n = interpolate(u1i, W) # For time i
c1_n, phi1_n = split(u1_n)
u2 = interpolate(u2i, V2)
u2_n = interpolate(u2i, V2)
u2_G = interpolate(u2i, V2)
# Define the test functions
v1 = TestFunction(W)
(v1c, v1p) = split(v1)
v2 = TestFunction(V2)
du1 = TrialFunction(W)
du2 = TrialFunction(V2)
u1.set_allow_extrapolation(True)
u2.set_allow_extrapolation(True)
u1_G.set_allow_extrapolation(True)
u2_G.set_allow_extrapolation(True)
u1_n.set_allow_extrapolation(True)
u2_n.set_allow_extrapolation(True)
tol = 1E-16
def update_bcs1(bias):
return [DirichletBC(W.sub(1), bias / er, boundaries1, 1)]
bcs1 = [DirichletBC(W.sub(0), Cs * 1E-12, boundaries1, 1),
DirichletBC(W.sub(1), voltage / er, boundaries1, 1)]
bcs2 = None # [DirichletBC(V2,Cbulk*CM3TOUM3,boundaries2,2)]
def get_variational_form1(uc, up, gp1_, gp2_, u2c):
sf2 = segregation_flux(hums, uc, u2c, m)
gc01 = 0.0 # -(mu1*uc*gp1 - sf2)
gc12 = -(mu1 * uc * gp2_ + sf2)
a = -D1ums * inner(grad(uc), grad(v1c)) * dx1
a += gc01 * v1c * ds1(1) + gc12 * v1c * ds1(2)
a -= mu1 * uc * inner(grad(up), grad(v1c)) * dx1
a += mu1 * gp1_ * uc * v1c * ds1(1) + mu1 * gp2_ * uc * v1c * ds1(2)
a -= (inner(grad(up), grad(v1p)) - qee * uc * v1p) * dx1
a += gp1_ * v1p * ds1(1) + gp2_ * v1p * ds1(2)
return a
def get_variational_form2(uc, u1c):
sf2 = segregation_flux(hums, u1c, uc, m)
gc21 = sf2
a = -D2ums * inner(grad(uc), grad(v2)) * dx2
a += gc21 * v2 * ds2(1)
return a
def getTRBDF2ta(uc, up):
r = D1ums * div(grad(uc)) + div(grad(up)) \
+ mu1 * uc * div(grad(up)) + mu1 * inner(grad(up), grad(uc)) \
+ qee * uc
return r
# def segregation_flux(h_, cc1, cc2, m_: Union[float, int] = 1):
# ux1, uc1, _ = get_solution_array1(mesh1, cc1)
# ux2, uc2, _ = get_solution_array1(mesh1, cc2)
# J = h_ * (uc1[-1] - uc2[-1] / m_)
# return J
def segregation_flux(h_, cc1, cc2, m_: Union[float, int] = 1):
J = h_ * (cc1 - cc2 / m_)
return J
def update_potential_bc(uui, bias: float = voltage):
# The total concentration in the oxide (um-2)
# uc,up = ui.split()
# Ctot = assemble(uc*dx)
# The integral in Poisson's equation solution (Nicollian & Brews p.426)
# units: 1/um
# Cint = assemble(uc*Expression('x[0]',degree=1)*dx)
# Get the solution in an array form
uxi, uci, upi = get_solution_array1(mesh1, uui)
# The total concentration in the oxide (cm-2)
Ctot_ = integrate.simps(uci, uxi) * 1E-4
# The integral in Poisson's equation solution (Nicollian & Brews p.426)
# units: 1/cm
Cint_ = integrate.simps(uxi * uci, uxi) * 1E-8
# The centroid of the charge distribution
xbar_ = Cint_ / Ctot_ * 1E4 # um
# The surface charge density at silicon C/cm2
scd_si = -constants.e * (xbar_ / L1) * Ctot_
# The surface charge density at the gate C/cm2
scd_g = -constants.e * (1.0 - xbar_ / L1) * Ctot_
# The electric field at the gate interface
E_g_ = bias / L1 / er + 1E-2 * scd_g / constants.epsilon_0 / er # x
# The electric field at the Si interface
E_si_ = bias / L1 / er - 1E-2 * scd_si / constants.epsilon_0 / er # x
# Since grad(phi) = -E
# s1: <-
# s2: ->
# gp1 = E_g
gp1_ = E_g_
gp2_ = -E_si_
vfb_ = -q_red * Cint_ / (er * e0_red) * 1E-5
return gp1_, gp2_, Cint_, Ctot_, E_g_, E_si_, xbar_, vfb_
hk1 = CellDiameter(mesh1)
GAMMA = 2.0 - np.sqrt(2) # 0.59
TRF = Constant(0.5 * GAMMA)
BDF2_T1 = Constant(1.0 / (GAMMA * (2.0 - GAMMA)))
BDF2_T2 = Constant((1.0 - GAMMA) * (1.0 - GAMMA) / (GAMMA * (2.0 - GAMMA)))
BDF2_T3 = Constant((1.0 - GAMMA) / (2.0 - GAMMA))
ffc_options = {"optimize": True,
'cpp_optimize': True,
"quadrature_degree": 5}
newton_solver_parameters = {"nonlinear_solver": "newton",
"newton_solver": {
"linear_solver": "lu",
# "preconditioner": 'ilu', # 'hypre_euclid',
"convergence_criterion": "incremental",
"absolute_tolerance": 1E-5,
"relative_tolerance": 1E-4,
"maximum_iterations": max_iter,
"relaxation_parameter": relaxation_parameter,
# 'krylov_solver': {
# 'absolute_tolerance': 1E-8,
# 'relative_tolerance': 1E-6,
# 'maximum_iterations': 100}
}}
def get_solvers_1(gp1_, gp2_, dt_):
a10 = get_variational_form1(c1_n, phi1_n, gp1_, gp2_, u2_n)
a1G = get_variational_form1(c1_G, phi1_G, gp1_, gp2_, u2_G)
a11 = get_variational_form1(c1, phi1, gp1_, gp2_, u2)
F1G = (1 / dt_) * (c1_G - c1_n) * v1c * dx1 - TRF * (a1G + a10)
F1N = (1 / dt_) * (c1 - BDF2_T1 * c1_G + BDF2_T2 * c1_n) * v1c * dx1 - BDF2_T3 * a11
# SUPG stabilization
b1_ = mu1 * Dx(phi1_n, 0)
nb1 = sqrt(dot(b1_, b1_) + DOLFIN_EPS)
Pek1 = nb1 * hk1 / (2.0 * D1ums)
b2_ = mu1 * Dx(phi1_G, 0)
nb2 = sqrt(dot(b2_, b2_) + DOLFIN_EPS)
Pek2 = nb2 * hk1 / (2.0 * D1ums)
tau1 = conditional(gt(Pek1, DOLFIN_EPS),
(hk1 / (2.0 * nb1)) * (((exp(2.0 * Pek1) + 1.0) / (exp(2.0 * Pek1) - 1.0)) - 1.0 / Pek1),
0.0)
tau2 = conditional(gt(Pek2, DOLFIN_EPS),
(hk1 / (2.0 * nb2)) * (((exp(2.0 * Pek2) + 1.0) / (exp(2.0 * Pek2) - 1.0)) - 1.0 / Pek2),
0.0)
# get the skew symmetric part of the L operator
# LSSNP = dot(vel2,Dx(v2,0))
Lss1 = (mu1 * inner(grad(phi1_G), grad(v1c)) + (mu1 / 2) * div(grad(phi1_G)) * v1c)
Lss2 = (mu1 * inner(grad(phi1), grad(v1c)) + (mu1 / 2) * div(grad(phi1)) * v1c)
# SUPG Stabilization term
ta = getTRBDF2ta(c1_G, phi1_G)
tb = getTRBDF2ta(c1_n, phi1_n)
tc = getTRBDF2ta(c1, phi1)
ra = inner(((1 / dt_) * (c1_G - c1_n) - TRF * (ta + tb)), tau1 * Lss1) * dx1
rb = inner((c1 / dt_ - BDF2_T1 * c1_G / dt_ + BDF2_T2 * c1_n / dt_ - BDF2_T3 * tc), tau2 * Lss2) * dx1
F1G += ra
F1N += rb
J1G = derivative(F1G, u1_G, du1)
J1N = derivative(F1N, u1, du1) # J1G
problem1N = NonlinearVariationalProblem(F1N, u1, bcs1, J1N, form_compiler_parameters=ffc_options)
problem1G = NonlinearVariationalProblem(F1G, u1_G, bcs1, J1G, form_compiler_parameters=ffc_options)
solver1N_ = NonlinearVariationalSolver(problem1N)
solver1N_.parameters.update(newton_solver_parameters)
solver1G_ = NonlinearVariationalSolver(problem1G)
solver1G_.parameters.update(newton_solver_parameters)
return solver1N_, solver1G_
def get_solvers_2(dt_):
a20 = get_variational_form2(u2_n, c1_n)
a2G = get_variational_form2(u2_G, c1_G)
a21 = get_variational_form2(u2, c1)
F2G = (1 / dt_) * (u2_G - u2_n) * v2 * dx2 - TRF * (a2G + a20)
F2N = (1 / dt_) * (u2 - BDF2_T1 * u2_G + BDF2_T2 * u2_n) * v2 * dx2 - BDF2_T3 * a21
J2G = derivative(F2G, u2_G, du2)
J2N = derivative(F2N, u2, du2) # J2G
problem2N = NonlinearVariationalProblem(F2N, u2, bcs2, J2N, form_compiler_parameters=ffc_options)
problem2G = NonlinearVariationalProblem(F2G, u2_G, bcs2, J2G, form_compiler_parameters=ffc_options)
solver2N_ = NonlinearVariationalSolver(problem2N)
solver2N_.parameters.update(newton_solver_parameters)
solver2G_ = NonlinearVariationalSolver(problem2G)
solver2G_.parameters.update(newton_solver_parameters)
return solver2N_, solver2G_
# The time for each concentration profile
# Get tau_c
tauc = utils.tau_c(D=D1cms, E=E, L=L1 * 1E-4, T=tempC)
delta_t = time_s / (N + 1)
if time_s <= 86400 * 4 or int(tauc / delta_t) < 50:
size_n = N + 1
t_sim = np.array([k * dt for k in range(size_n)], dtype=np.float64)
dtt = np.concatenate([np.diff(t_sim), [dt]])
else:
base = 1.5
dt_min = 1
dt_max = dt
num_t = 30
b1 = np.log(dt_min) / np.log(base)
b2 = np.log(dt_max) / | np.log(base) | numpy.log |
# -*-coding:utf-8-*-
# Author: SS and WP
# Email: <EMAIL>
import numpy as np
def normalize(v):
norm=np.linalg.norm(v)
if norm==0:
return v
return v/norm
# g(x) is wrong! Why?
def g(x):
return np.max(x, 0)
def ggg(x):
if(x>=0.0):
return x
else:
return 0.0
def vectorAngleCos(x,y):
if (len(x) != len(y)):
print('error input,x and y is not in the same space')
return
if np.linalg.norm(x)*np.linalg.norm(y) != 0.0:
cosValue = np.dot(x,y)/(np.linalg.norm(x)*np.linalg.norm(y))
angle = np.arccos(cosValue)
else:
angle = 0.0
return angle
def GeneralEquation(first_x,first_y,second_x,second_y):
# The general equation: Ax+By+C=0
# from http://www.cnblogs.com/DHUtoBUAA/
A=second_y-first_y
B=first_x-second_x
C=second_x*first_y-first_x*second_y
return A,B,C
def GetIntersectPointofLines(p1, p2, p3, p4):
# from http://www.cnblogs.com/DHUtoBUAA/
A1,B1,C1=GeneralEquation(p1[0], p1[1], p2[0], p2[1])
A2,B2,C2 = GeneralEquation(p3[0], p3[1], p4[0], p4[1])
m=A1*B2-A2*B1
if m==0:
print("No intersection point")
return None
else:
x=(C2*B1-C1*B2)/m
y=(C1*A2-C2*A1)/m
#print 'Get intersection point:', [x, y]
return [x, y]
def crossPoint(p1, p2, p3, p4):
# Get coordinates of four points
x1=p1[0]
y1=p1[1]
x2=p2[0]
y2=p2[1]
x3=p3[0]
y3=p3[1]
x4=p4[0]
y4=p4[1]
k1=(y2-y1)*1.0/((x2-x1)*1.0) # Calculate k1 as float
b1=y1*1.0-x1*k1*1.0 #From integer to float
if (x4-x3)==0: #L2: slope k2 does not exist
k2=None
b2=0
else:
k2=(y4-y3)*1.0/(x4-x3) #L2: slope k2 exists
b2=y3*1.0-x3*k2*1.0
if k2==None:
x=x3
else:
x=(b2-b1)*1.0/(k1-k2)
y=k1*x*1.0+b1*1.0
return [x, y]
def lineIntersection(p1, p2, w1, w2, fuzP=0.0, fuzW=0.0): #wall):
#p1 = self.pos
#p2 = other.pos
#w1 = np.array([wall.params[0],wall.params[1]])
#w2 = np.array([wall.params[2],wall.params[3]])
result = None #np.array([0.0,0.0])
if max(p1[0], p2[0])<min(w1[0], w2[0]) or min(p1[0], p2[0])>max(w1[0], w2[0]):
flag = False
return result, flag
if max(p1[1], p2[1])<min(w1[1], w2[1]) or min(p1[1], p2[1])>max(w1[1], w2[1]):
flag = False
return result, flag
#result = np.array([0.0,0.0])
result = GetIntersectPointofLines(p1, p2, w1, w2)
#result = crossPoint(p1, p2, w1, w2)
if result == None:
flag = False
return result, flag
logic1 = np.dot(result-w1, result-w2)
logic2 = np.dot(result-p1, result-p2)
# flag is True if there is a wall in between.
# otherwise it is false.
flag = True
if logic1>0.0 and min(np.linalg.norm(result-w1), np.linalg.norm(result-w2))>=fuzW:
flag = False
result = None
if logic2>0.0: #and min(np.linalg.norm(result-p1), np.linalg.norm(result-p2))>=fuzP:
flag = False
result = None
return result, flag
def distanceP2L(point, p0, p1):
d = p1-p0
ymp0 = point-p0
ymp1 = point-p1
dist1 = np.linalg.norm(p0-point)
dist2 = np.linalg.norm(p1-point)
if np.allclose(np.dot(d,d), | np.zeros(2) | numpy.zeros |
import numpy as np
from scipy.spatial.distance import pdist, squareform
from sklearn.cluster import DBSCAN
from sklearn.utils import check_array
from ML import calculate_rand_score, stock_labels
from utils import get_dataset_steps_positions_velocities_headings
def get_mean_score(name_file):
try:
with open(name_file):
score = np.mean(np.array(np.loadtxt(name_file), dtype=float))
return score
except IOError:
print("Could not open file {0}".format(name_file))
exit()
class ST_DBSCAN:
"""
ST_DBSCAN class for clustering
ref
- ST-DBSCAN: An algorithm for clustering spatial–temporal data
<NAME>, <NAME>
----------
:param eps1: float, the density threshold for spatial neighborhood
:param eps2: float, The temporal threshold for temporal neighborhood
:param min_samples: The number of samples required for an object to be a core point.
:param metric_1: string, metric for spatial neighborhood
:param metric_2: string, metric for temporal neighborhood
string default='euclidean', can also be a custom function
The used distance metric - more options are
‘braycurtis’, ‘canberra’, ‘chebyshev’, ‘cityblock’, ‘correlation’,
‘cosine’, ‘dice’, ‘euclidean’, ‘hamming’, ‘jaccard’, ‘jensenshannon’,
‘kulsinski’, ‘mahalanobis’, ‘matching’, ‘rogerstanimoto’, ‘sqeuclidean’,
‘russellrao’, ‘seuclidean’, ‘sokalmichener’, ‘sokalsneath’, ‘yule’.
:param indices_1: list of column indices where spatial attributes are situated in the data
:param indices_2: list of column indices where non-spatial attributes are situated in the data
"""
def __init__(self,
eps1,
eps2,
min_samples,
indices_1,
indices_2,
metric_1='euclidean',
metric_2='euclidean',
):
self.eps1 = eps1
self.eps2 = eps2
self.indices_1 = indices_1
self.indices_2 = indices_2
self.min_samples = min_samples
self.metric_1 = metric_1
self.metric_2 = metric_2
self.labels = None
assert self.eps1 > 0, 'eps1 must be positive'
assert self.eps2 > 0, 'eps2 must be positive'
assert type(self.min_samples) == int, 'min_samples must be a positive integer'
assert self.min_samples > 0, 'min_samples must be a positive integer'
def fit(self, X):
# check if input is correct
X = check_array(X)
if not self.eps1 > 0.0 or not self.eps2 > 0.0 or not self.min_samples > 0.0:
raise ValueError('eps1, eps2, minPts must be positive')
# Compute squared distance matrix for
non_spatial_square_dist_matrix = pdist(X[:, self.indices_1], metric=self.metric_1)
spatial_square_dist_matrix = pdist(X[:, self.indices_2], metric=self.metric_2)
# filter the euc_dist matrix using the time_dist
dist = np.where(non_spatial_square_dist_matrix <= self.eps1, spatial_square_dist_matrix, 10 * self.eps2)
db = DBSCAN(eps=self.eps2,
min_samples=self.min_samples,
metric='precomputed')
db.fit(squareform(dist))
self.labels = db.labels_
def stock_labels_to_directory(self, directory, nb_obs, step_init, step_end):
true_steps = np.arange(step_init, step_end)
steps = np.arange(0, step_end - step_init)
ind_init = steps[0]
# for each step
for (step, step_true) in zip(steps, true_steps):
# get all the observations of step i
ind_to_get = np.arange(ind_init, ind_init + nb_obs)
label_step = self.labels[ind_to_get]
stock_labels(label_step, step_true, repository=directory,
filename="ST_DBSCAN_eps1=" + str(self.eps1) + "eps2="
+ str(self.eps2) + "Nsample="
+ str(self.min_samples) + "label")
ind_init += nb_obs
def generate_results(self, directory, step_init, step_end):
steps = list(np.arange(step_init, step_end)) # steps to take into account in the calculation
filename_true = "ground_truth_label" # file name for ground-truth (see for example file_name
# argument in stock_file function in build_ground_truth function in module ML.py)
filename_pred = "ST_DBSCAN_eps1=" + str(self.eps1) + "eps2=" + \
str(self.eps2) + "Nsample=" + str(self.min_samples) + "label"
score_mean = calculate_rand_score(steps, directory, filename_true, filename_pred)
return score_mean
def split_data(data, n_indiv, time_step):
list_data = list()
for i in | np.arange(0, data.shape[0], n_indiv * time_step) | numpy.arange |
'''
------------------------------------------------------------------------
Functions for created the matrix of ability levels, e. This can
only be used for looking at the 25, 50, 70, 80, 90, 99, and 100th
percentiles, as it uses fitted polynomials to those percentiles.
For a more generic version, see income_nopoly.py.
------------------------------------------------------------------------
'''
import numpy as np
import scipy.optimize as opt
import scipy.interpolate as si
from ogusa import parameter_plots as pp
import os
CUR_PATH = os.path.abspath(os.path.dirname(__file__))
OUTPUT_DIR = os.path.join(CUR_PATH, 'OUTPUT', 'ability')
def arctan_func(xvals, a, b, c):
r'''
This function generates predicted ability levels given data (xvals)
and parameters a, b, and c, from the following arctan function:
.. math::
y = (-a / \pi) * \arctan(b * x + c) + (a / 2)
Args:
xvals (Numpy array): data inputs to arctan function
a (scalar): scale parameter for arctan function
b (scalar): curvature parameter for arctan function
c (scalar): shift parameter for arctan function
RETURNS:
yvals (Numpy array): predicted values (output) of arctan
function
'''
yvals = (-a / np.pi) * np.arctan(b * xvals + c) + (a / 2)
return yvals
def arctan_deriv_func(xvals, a, b, c):
r'''
This function generates predicted derivatives of arctan function
given data (xvals) and parameters a, b, and c. The functional form
of the derivative of the function is the following:
.. math::
y = - (a * b) / (\pi * (1 + (b * xvals + c)^2))
Args:
xvals (Numpy array): data inputs to arctan derivative function
a (scalar): scale parameter for arctan function
b (scalar): curvature parameter for arctan function
c (scalar): shift parameter for arctan function
RETURNS:
yvals (Numpy array): predicted values (output) of arctan
derivative function
'''
yvals = -(a * b) / (np.pi * (1 + (b * xvals + c) ** 2))
return yvals
def arc_error(abc_vals, params):
'''
This function returns a vector of errors in the three criteria on
which the arctan function is fit to predict extrapolated ability in
ages 81 to 100.::
1) The arctan function value at age 80 must match the estimated
original function value at age 80.
2) The arctan function slope at age 80 must match the estimated
original function slope at age 80.
3) The level of ability at age 100 must be a given fraction
(abil_deprec) below the ability level at age 80.
Args:
abc_vals (tuple): contains (a,b,c)
* a (scalar): scale parameter for arctan function
* b (scalar): curvature parameter for arctan function
* c (scalar): shift parameter for arctan function
params (tuple): contains (first_point, coef1, coef2, coef3,
abil_deprec)
* first_point (scalar): ability level at age 80, > 0
* coef1 (scalar): coefficient in log ability equation on
linear term in age
* coef2 (scalar): coefficient in log ability equation on
quadratic term in age
* coef3 (scalar): coefficient in log ability equation on
cubic term in age
* abil_deprec (scalar): ability depreciation rate between
ages 80 and 100, in (0, 1).
Returns:
error_vec (Numpy array): errors ([error1, error2, error3])
* error1 (scalar): error between ability level at age 80
from original function minus the predicted ability at
age 80 from the arctan function given a, b, and c
* error2 (scalar): error between the slope of the original
function at age 80 minus the slope of the arctan
function at age 80 given a, b, and c
* error3 (scalar): error between the ability level at age
100 predicted by the original model value times
abil_deprec minus the ability predicted by the arctan
function at age 100 given a, b, and c
'''
a, b, c = abc_vals
first_point, coef1, coef2, coef3, abil_deprec = params
error1 = first_point - arctan_func(80, a, b, c)
if (3 * coef3 * 80 ** 2 + 2 * coef2 * 80 + coef1) < 0:
error2 = ((3 * coef3 * 80 ** 2 + 2 * coef2 * 80 + coef1) *
first_point - arctan_deriv_func(80, a, b, c))
else:
error2 = -.02 * first_point - arctan_deriv_func(80, a, b, c)
error3 = abil_deprec * first_point - arctan_func(100, a, b, c)
error_vec = np.array([error1, error2, error3])
return error_vec
def arctan_fit(first_point, coef1, coef2, coef3, abil_deprec,
init_guesses):
'''
This function fits an arctan function to the last 20 years of the
ability levels of a particular ability group to extrapolate
abilities by trying to match the slope in the 80th year and the
ability depreciation rate between years 80 and 100.
Args:
first_point (scalar): ability level at age 80, > 0
coef1 (scalar): coefficient in log ability equation on linear
term in age
coef2 (scalar): coefficient in log ability equation on
quadratic term in age
coef3 (scalar): coefficient in log ability equation on cubic
term in age
abil_deprec (scalar): ability depreciation rate between
ages 80 and 100, in (0, 1)
init_guesses (Numpy array): initial guesses
Returns:
abil_last (Numpy array): extrapolated ability levels for ages
81 to 100, length 20
'''
params = [first_point, coef1, coef2, coef3, abil_deprec]
solution = opt.root(arc_error, init_guesses,
args=params, method='lm')
[a, b, c] = solution.x
old_ages = np.linspace(81, 100, 20)
abil_last = arctan_func(old_ages, a, b, c)
return abil_last
def get_e_interp(S, age_wgts, age_wgts_80, abil_wgts, plot=False):
'''
This function takes a source matrix of lifetime earnings profiles
(abilities, emat) of size (80, 7), where 80 is the number of ages
and 7 is the number of ability types in the source matrix, and
interpolates new values of a new S x J sized matrix of abilities
using linear interpolation. [NOTE: For this application, cubic
spline interpolation introduces too much curvature.]
Args:
S (int): number of ages to interpolate. This method assumes that
ages are evenly spaced between the beginning of the 21st
year and the end of the 100th year, >= 3
age_wgts (Numpy array): distribution of population in each age
for the interpolated ages, length S
age_wgts_80 (Numpy array): percent of population in each
one-year age from 21 to 100, length 80
abil_wgts (Numpy array): distribution of population in each
ability group, length J
plot (bool): if True, creates plots of emat_orig and the new
interpolated emat_new
Returns:
emat_new_scaled (Numpy array): interpolated ability matrix scaled
so that population-weighted average is 1, size SxJ
'''
# Get original 80 x 7 ability matrix
abil_wgts_orig = np.array([0.25, 0.25, 0.2, 0.1, 0.1, 0.09, 0.01])
emat_orig = get_e_orig(age_wgts_80, abil_wgts_orig, plot)
# Return emat_orig if S = 80 and abil_wgts = abil_wgts_orig
if S == 80 and np.array_equal(abil_wgts,
np.array([0.25, 0.25, 0.2, 0.1, 0.1,
0.09, 0.01])) is True:
emat_new_scaled = emat_orig
else:
# generate abil_midp vector
J = abil_wgts.shape[0]
abil_midp = np.zeros(J)
pct_lb = 0.0
for j in range(J):
abil_midp[j] = pct_lb + 0.5 * abil_wgts[j]
pct_lb += abil_wgts[j]
# Make sure that values in abil_midp are within interpolating
# bounds set by the hard coded abil_wgts_orig
if abil_midp.min() < 0.125 or abil_midp.max() > 0.995:
err = ("One or more entries in abils vector is outside the "
+ "allowable bounds.")
raise RuntimeError(err)
emat_j_midp = np.array([0.125, 0.375, 0.600, 0.750, 0.850,
0.945, 0.995])
emat_s_midp = np.linspace(20.5, 99.5, 80)
emat_j_mesh, emat_s_mesh = np.meshgrid(emat_j_midp, emat_s_midp)
newstep = 80 / S
new_s_midp = np.linspace(
20 + 0.5 * newstep, 100 - 0.5 * newstep, S)
new_j_mesh, new_s_mesh = np.meshgrid(abil_midp, new_s_midp)
newcoords = np.hstack((emat_s_mesh.reshape((80*7, 1)),
emat_j_mesh.reshape((80*7, 1))))
emat_new = si.griddata(newcoords, emat_orig.flatten(),
(new_s_mesh, new_j_mesh), method='linear')
emat_new_scaled = emat_new / (emat_new * age_wgts.reshape(S, 1)
* abil_wgts.reshape(1, J)).sum()
if plot:
kwargs = {'filesuffix': '_intrp_scaled'}
pp.plot_income_data(
new_s_midp, abil_midp, abil_wgts, emat_new_scaled,
OUTPUT_DIR, **kwargs)
return emat_new_scaled
def get_e_orig(age_wgts, abil_wgts, plot=False):
r'''
This function generates the 80 x 7 matrix of lifetime earnings
ability profiles, corresponding to annual ages from 21 to 100 and to
paths based on income percentiles 0-25, 25-50, 50-70, 70-80, 80-90,
90-99, 99-100. The ergodic population distribution is an input in
order to rescale the paths so that the weighted average equals 1.
The data come from the following file:
`data/ability/FR_wage_profile_tables.xlsx`
The polynomials are of the form
.. math::
\ln(abil) = \alpha + \beta_{1}\text{age} + \beta_{2}\text{age}^2
+ \beta_{3}\text{age}^3
Values come from regression analysis using IRS CWHS with hours
imputed from the CPS.
Args:
age_wgts (Numpy array): ergodic age distribution, length S
abil_wgts (Numpy array): population weights in each lifetime
earnings group, length J
plot (bool): if True, generates 3D plots of ability paths
Returns:
e_orig_scaled (Numpy array): = lifetime ability profiles scaled
so that population-weighted average is 1, size SxJ
'''
# Return and error if age_wgts is not a vector of size (80,)
if age_wgts.shape[0] != 80:
err = "Vector age_wgts does not have 80 elements."
raise RuntimeError(err)
# Return and error if abil_wgts is not a vector of size (7,)
if abil_wgts.shape[0] != 7:
err = "Vector abil_wgts does not have 7 elements."
raise RuntimeError(err)
# 1) Generate polynomials and use them to get income profiles for
# ages 21 to 80.
one = np.array([-0.09720122, 0.05995294, 0.17654618,
0.21168263, 0.21638731, 0.04500235, 0.09229392])
two = np.array([0.00247639, -0.00004086, -0.00240656, -
0.00306555, -0.00321041, 0.00094253, 0.00012902])
three = np.array([-0.00001842, -0.00000521, 0.00001039,
0.00001438, 0.00001579, -0.00001470, -0.00001169])
const = np.array([3.41e+00, 0.69689692, -0.78761958, -1.11e+00,
-0.93939272, 1.60e+00, 1.89e+00])
ages_short = np.tile(np.linspace(21, 80, 60).reshape((60, 1)),
(1, 7))
log_abil_paths = (const + (one * ages_short) +
(two * (ages_short ** 2)) +
(three * (ages_short ** 3)))
abil_paths = | np.exp(log_abil_paths) | numpy.exp |
"""
Get the normalized best template to do flux calibration.
"""
#- TODO: refactor algorithmic code into a separate module/function
import argparse
import sys
import numpy as np
from astropy.io import fits
from astropy import units
from astropy.table import Table
from desispec import io
from desispec.fluxcalibration import match_templates,normalize_templates,isStdStar
from desispec.interpolation import resample_flux
from desiutil.log import get_logger
from desispec.parallel import default_nproc
from desispec.io.filters import load_legacy_survey_filter
from desiutil.dust import ext_odonnell
from desispec.fiberbitmasking import get_fiberbitmasked_frame
def parse(options=None):
parser = argparse.ArgumentParser(description="Fit of standard star spectra in frames.")
parser.add_argument('--frames', type = str, default = None, required=True, nargs='*',
help = 'list of path to DESI frame fits files (needs to be same exposure, spectro)')
parser.add_argument('--skymodels', type = str, default = None, required=True, nargs='*',
help = 'list of path to DESI sky model fits files (needs to be same exposure, spectro)')
parser.add_argument('--fiberflats', type = str, default = None, required=True, nargs='*',
help = 'list of path to DESI fiberflats fits files (needs to be same exposure, spectro)')
parser.add_argument('--starmodels', type = str, help = 'path of spectro-photometric stellar spectra fits')
parser.add_argument('-o','--outfile', type = str, help = 'output file for normalized stdstar model flux')
parser.add_argument('--ncpu', type = int, default = default_nproc, required = False, help = 'use ncpu for multiprocessing')
parser.add_argument('--delta-color', type = float, default = 0.2, required = False, help = 'max delta-color for the selection of standard stars (on top of meas. errors)')
parser.add_argument('--color', type = str, default = "G-R", choices=['G-R', 'R-Z'], required = False, help = 'color for selection of standard stars')
parser.add_argument('--z-max', type = float, default = 0.008, required = False, help = 'max peculiar velocity (blue/red)shift range')
parser.add_argument('--z-res', type = float, default = 0.00002, required = False, help = 'dz grid resolution')
parser.add_argument('--template-error', type = float, default = 0.1, required = False, help = 'fractional template error used in chi2 computation (about 0.1 for BOSS b1)')
parser.add_argument('--maxstdstars', type=int, default=30, \
help='Maximum number of stdstars to include')
log = get_logger()
args = None
if options is None:
args = parser.parse_args()
cmd = ' '.join(sys.argv)
else:
args = parser.parse_args(options)
cmd = 'desi_fit_stdstars ' + ' '.join(options)
log.info('RUNNING {}'.format(cmd))
return args
def safe_read_key(header,key) :
value = None
try :
value=header[key]
except KeyError :
value = None
pass
if value is None : # second try
value=header[key.ljust(8).upper()]
return value
def dust_transmission(wave,ebv) :
Rv = 3.1
extinction = ext_odonnell(wave,Rv=Rv)
return 10**(-Rv*extinction*ebv/2.5)
def main(args) :
""" finds the best models of all standard stars in the frame
and normlize the model flux. Output is written to a file and will be called for calibration.
"""
log = get_logger()
log.info("mag delta %s = %f (for the pre-selection of stellar models)"%(args.color,args.delta_color))
log.info('multiprocess parallelizing with {} processes'.format(args.ncpu))
# READ DATA
############################################
# First loop through and group by exposure and spectrograph
frames_by_expid = {}
for filename in args.frames :
log.info("reading %s"%filename)
frame=io.read_frame(filename)
expid = safe_read_key(frame.meta,"EXPID")
camera = safe_read_key(frame.meta,"CAMERA").strip().lower()
spec = camera[1]
uniq_key = (expid,spec)
if uniq_key in frames_by_expid.keys():
frames_by_expid[uniq_key][camera] = frame
else:
frames_by_expid[uniq_key] = {camera: frame}
frames={}
flats={}
skies={}
spectrograph=None
starfibers=None
starindices=None
fibermap=None
# For each unique expid,spec pair, get the logical OR of the FIBERSTATUS for all
# cameras and then proceed with extracting the frame information
# once we modify the fibermap FIBERSTATUS
for (expid,spec),camdict in frames_by_expid.items():
fiberstatus = None
for frame in camdict.values():
if fiberstatus is None:
fiberstatus = frame.fibermap['FIBERSTATUS'].data.copy()
else:
fiberstatus |= frame.fibermap['FIBERSTATUS']
for camera,frame in camdict.items():
frame.fibermap['FIBERSTATUS'] |= fiberstatus
# Set fibermask flagged spectra to have 0 flux and variance
frame = get_fiberbitmasked_frame(frame,bitmask='stdstars',ivar_framemask=True)
frame_fibermap = frame.fibermap
frame_starindices = np.where(isStdStar(frame_fibermap))[0]
#- Confirm that all fluxes have entries but trust targeting bits
#- to get basic magnitude range correct
keep = np.ones(len(frame_starindices), dtype=bool)
for colname in ['FLUX_G', 'FLUX_R', 'FLUX_Z']: #- and W1 and W2?
keep &= frame_fibermap[colname][frame_starindices] > 10**((22.5-30)/2.5)
keep &= frame_fibermap[colname][frame_starindices] < 10**((22.5-0)/2.5)
frame_starindices = frame_starindices[keep]
if spectrograph is None :
spectrograph = frame.spectrograph
fibermap = frame_fibermap
starindices=frame_starindices
starfibers=fibermap["FIBER"][starindices]
elif spectrograph != frame.spectrograph :
log.error("incompatible spectrographs %d != %d"%(spectrograph,frame.spectrograph))
raise ValueError("incompatible spectrographs %d != %d"%(spectrograph,frame.spectrograph))
elif starindices.size != frame_starindices.size or np.sum(starindices!=frame_starindices)>0 :
log.error("incompatible fibermap")
raise ValueError("incompatible fibermap")
if not camera in frames :
frames[camera]=[]
frames[camera].append(frame)
# possibly cleanup memory
del frames_by_expid
for filename in args.skymodels :
log.info("reading %s"%filename)
sky=io.read_sky(filename)
camera=safe_read_key(sky.header,"CAMERA").strip().lower()
if not camera in skies :
skies[camera]=[]
skies[camera].append(sky)
for filename in args.fiberflats :
log.info("reading %s"%filename)
flat=io.read_fiberflat(filename)
camera=safe_read_key(flat.header,"CAMERA").strip().lower()
# NEED TO ADD MORE CHECKS
if camera in flats:
log.warning("cannot handle several flats of same camera (%s), will use only the first one"%camera)
#raise ValueError("cannot handle several flats of same camera (%s)"%camera)
else :
flats[camera]=flat
if starindices.size == 0 :
log.error("no STD star found in fibermap")
raise ValueError("no STD star found in fibermap")
log.info("found %d STD stars"%starindices.size)
log.warning("Not using flux errors for Standard Star fits!")
# DIVIDE FLAT AND SUBTRACT SKY , TRIM DATA
############################################
# since poping dict, we need to copy keys to iterate over to avoid
# RuntimeError due to changing dict
frame_cams = list(frames.keys())
for cam in frame_cams:
if not cam in skies:
log.warning("Missing sky for %s"%cam)
frames.pop(cam)
continue
if not cam in flats:
log.warning("Missing flat for %s"%cam)
frames.pop(cam)
continue
flat=flats[cam]
for frame,sky in zip(frames[cam],skies[cam]) :
frame.flux = frame.flux[starindices]
frame.ivar = frame.ivar[starindices]
frame.ivar *= (frame.mask[starindices] == 0)
frame.ivar *= (sky.ivar[starindices] != 0)
frame.ivar *= (sky.mask[starindices] == 0)
frame.ivar *= (flat.ivar[starindices] != 0)
frame.ivar *= (flat.mask[starindices] == 0)
frame.flux *= ( frame.ivar > 0) # just for clean plots
for star in range(frame.flux.shape[0]) :
ok=np.where((frame.ivar[star]>0)&(flat.fiberflat[star]!=0))[0]
if ok.size > 0 :
frame.flux[star] = frame.flux[star]/flat.fiberflat[star] - sky.flux[star]
frame.resolution_data = frame.resolution_data[starindices]
# CHECK S/N
############################################
# for each band in 'brz', record quadratic sum of median S/N across wavelength
snr=dict()
for band in ['b','r','z'] :
snr[band]= | np.zeros(starindices.size) | numpy.zeros |
# coding: utf-8
from typing import Tuple
import math
import os
from datetime import datetime
from os.path import abspath, join
import re
import numpy as np
from scipy import ndimage
from ncmagics import readnc, japanmap
def d_from_filterd_min(prmsl: np.ndarray, lat: np.ndarray, lon: np.ndarray):
# minimum value filter
filterd_prmsl = np.where(
ndimage.filters.minimum_filter(
prmsl, size=(18, 12), mode=('nearest', 'wrap')
) == prmsl
)
# spherical trigonometry (球面三角法)
dx_s = np.array([
np.deg2rad(lon[filterd_prmsl[1]] - lo)
for lo in lon
]) # (141, 14)
y0_s = np.deg2rad(lat) # (81)
y1_s = np.deg2rad(lat[filterd_prmsl[0]]) # (14)
cos_d_part1 = np.array([
np.sin(y0) * np.sin(y1_s)
for y0 in y0_s
])
cos_d_part2_ = np.array([
np.cos(y0) * | np.cos(y1_s) | numpy.cos |
import itertools
import numpy as np
import sys
from collections import defaultdict, namedtuple
#Credit:
#https://github.com/dennybritz/reinforcement-learning
EpisodeStats = namedtuple("Stats",["episode_lengths", "episode_rewards"])
def make_epsilon_greedy_policy(Q, epsilon, decay, nA):
"""
Creates an epsilon-greedy policy based on a given Q-function and epsilon.
Args:
Q: A dictionary that maps from state -> action-values.
Each value is a numpy array of length nA (see below)
epsilon: The probability to select a random action . float between 0 and 1.
nA: Number of actions in the environment.
Returns:
A function that takes the observation as an argument and returns
the probabilities for each action in the form of a numpy array of length nA.
"""
def policy_fn(observation, episode):
e_prime = epsilon * decay ** episode
A = np.ones(nA, dtype=float) * e_prime / nA
if np.all(np.isclose(Q[observation], np.zeros(nA))):
best_action = | np.random.randint(nA) | numpy.random.randint |
import sqlite3
from tqdm import tqdm
import numpy as np
import array
import sys
import math
import os
import multiprocessing
import shutil
import pandas as pd
from scipy.signal import savgol_filter
class Reload:
def __init__(self, path_pri, path_tra, fold):
self.path_pri = path_pri
self.path_tra = path_tra
self.fold = fold
def sqlite_read(self, path):
"""
python读取sqlite数据库文件
"""
mydb = sqlite3.connect(path) # 链接数据库
mydb.text_factory = lambda x: str(x, 'gbk', 'ignore')
cur = mydb.cursor() # 创建游标cur来执行SQL语句
# 获取表名
cur.execute("SELECT name FROM sqlite_master WHERE type='table'")
Tables = cur.fetchall() # Tables 为元组列表
# 获取表结构的所有信息
if path[-5:] == 'pridb':
cur.execute("SELECT * FROM {}".format(Tables[3][0]))
res = cur.fetchall()[-2][1]
elif path[-5:] == 'tradb':
cur.execute("SELECT * FROM {}".format(Tables[1][0]))
res = cur.fetchall()[-3][1]
return int(res)
def read_with_time(self, time):
conn_pri = sqlite3.connect(self.path_pri)
result_pri = conn_pri.execute(
"Select SetID, Time, Chan, Thr, Amp, RiseT, Dur, Eny, RMS, Counts, TRAI FROM view_ae_data")
chan_1, chan_2, chan_3, chan_4 = [], [], [], []
t = [[] for _ in range(len(time) - 1)]
N_pri = self.sqlite_read(self.path_pri)
for _ in tqdm(range(N_pri)):
i = result_pri.fetchone()
if i[-2] is not None and i[-2] >= 6 and i[-1] > 0:
for idx, chan in zip(np.arange(1, 5), [chan_1, chan_2, chan_3, chan_4]):
if i[2] == idx:
chan.append(i)
for j in range(len(t)):
if time[j] <= i[1] < time[j + 1]:
t[j].append(i)
break
break
chan_1 = np.array(chan_1)
chan_2 = np.array(chan_2)
chan_3 = np.array(chan_3)
chan_4 = np.array(chan_4)
return t, chan_1, chan_2, chan_3, chan_4
def read_vallen_data(self, lower=2, t_cut=float('inf'), mode='all'):
data_tra, data_pri, chan_1, chan_2, chan_3, chan_4 = [], [], [], [], [], []
if mode == 'all' or mode == 'tra only':
conn_tra = sqlite3.connect(self.path_tra)
result_tra = conn_tra.execute(
"Select Time, Chan, Thr, SampleRate, Samples, TR_mV, Data, TRAI FROM view_tr_data")
N_tra = self.sqlite_read(self.path_tra)
for _ in tqdm(range(N_tra), ncols=80):
i = result_tra.fetchone()
if i[0] > t_cut:
continue
data_tra.append(i)
if mode == 'all' or mode == 'pri only':
conn_pri = sqlite3.connect(self.path_pri)
result_pri = conn_pri.execute(
"Select SetID, Time, Chan, Thr, Amp, RiseT, Dur, Eny, RMS, Counts, TRAI FROM view_ae_data")
N_pri = self.sqlite_read(self.path_pri)
for _ in tqdm(range(N_pri), ncols=80):
i = result_pri.fetchone()
if i[0] > t_cut:
continue
if i[-2] is not None and i[-2] > lower and i[-1] > 0:
data_pri.append(i)
if i[2] == 1:
chan_1.append(i)
if i[2] == 2:
chan_2.append(i)
elif i[2] == 3:
chan_3.append(i)
elif i[2] == 4:
chan_4.append(i)
data_tra = sorted(data_tra, key=lambda x: x[-1])
data_pri = np.array(data_pri)
chan_1 = np.array(chan_1)
chan_2 = np.array(chan_2)
chan_3 = np.array(chan_3)
chan_4 = np.array(chan_4)
return data_tra, data_pri, chan_1, chan_2, chan_3, chan_4
def read_pac_data(self, path, lower=2):
os.chdir(path)
dir_features = os.listdir(path)[0]
data_tra, data_pri, chan_1, chan_2, chan_3, chan_4 = [], [], [], [], [], []
with open(dir_features, 'r') as f:
data_pri = np.array([j.strip(', ') for i in f.readlines()[1:] for j in i.strip("\n")])
for _ in tqdm(range(N_tra), ncols=80):
i = result_tra.fetchone()
data_tra.append(i)
for _ in tqdm(range(N_pri), ncols=80):
i = result_pri.fetchone()
if i[-2] is not None and i[-2] > lower and i[-1] > 0:
data_pri.append(i)
if i[2] == 1:
chan_1.append(i)
if i[2] == 2:
chan_2.append(i)
elif i[2] == 3:
chan_3.append(i)
elif i[2] == 4:
chan_4.append(i)
data_tra = sorted(data_tra, key=lambda x: x[-1])
data_pri = np.array(data_pri)
chan_1 = np.array(chan_1)
chan_2 = | np.array(chan_2) | numpy.array |
# Anharmonic correction to vibrational frequencies
# Version 1.1 - 16/07/2020
# The file anharm_path.txt must be present in the root folder (the
# one containing the program). The content of anharm_path.txt is the name
# of the folder containing the data (usually, the folder relative to
# the phase to be investigated). Such name is assigned to the abs_path
# variable
# Input file: input_anharm.txt (under the abs_path folder)
# Structure of the input (input_anharm.txt):
#
# 1) folder name where SCAN data from CRYSTAL are stored
# 2) output file name (it will be written in the folder
# specified at line 1)
# 3) minimim, maximum temperatures and number of points
# where the anharmonic Helmholtz function will be
# computed
# 4) order of the polynomial used to fit the Helmholtz
# free energy as a function of V and T. The unit
# of the computed free energy is the hartree.
#
# The output file contains the power of the fitting polynomial
# together with the optimized coefficents to reconstruct the
# Helmholtz free energy as a function of V and T in the specified
# ranges. Volume ranges are from the input files found in the
# specified folder.
# Files required to be found in the specified folder:
# 1) volumes.dat: it contains the volumes at which the SCANMODE's
# where done together with the harmonic frequencies
# computed by CRYSTAL.
# If not both 0., the last two columns, specifies
# the minimum and maximum q to select.
# Volumes of the primitive cell in cubic A;
# frequencies in cm^-1.
# 2) vect.dat: eigenvectors of the normal mode: une column for
# each volume, n the same order as specified in
# the volumes.dat file
# 3) input.txt: it contains the names of the files where the Q
# energies from the SCANMODE's are stored, as
# they are copied and pasted from the CRYSTAL
# output
# 4) files whose names are stored in the input.txt file.
# NOTE: in order to be used with the BM3_thermal_2 program,
# fits from more than one normal modes must be of he same order
# All the output files produced here must be copied in the relevant
# input folder specified for the BM3_thermal_2.
# The Anharmonic correction in BM3_thermal_2 program is activated
# by the ANH keyword in the input file for that program.
# Usage:
# At the simplest level, just use the helm_fit() function to read
# the all the input and to make the relevant fits.
# from IPython import get_ipython
# get_ipython().magic('clear')
# get_ipython().magic('reset -sf')
import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.optimize import curve_fit
class anh_class():
pass
class data_class():
def __init__(self,dim):
self.dim=dim
self.nlev=int(self.dim/2)
class data_flag():
def __init__(self):
self.comp=np.array([],dtype=bool)
self.setup=False
def load_files():
'''
Loads data files and file names of the SCAN data
'''
data=np.loadtxt(path+'volumes.dat')
volumes=data[:,0]
h_freq=data[:,1]
qmn=data[:,2]
qmx=data[:,3]
nvol=volumes.size
scan_name=np.loadtxt(path+"input.txt", dtype=str)
mode_vect=np.loadtxt(path+"vect.dat", dtype=float)
glob.data=data
glob.volumes=volumes
glob.h_freq=h_freq
glob.nvol=nvol
glob.scan_name=scan_name
glob.mode_vect=mode_vect
glob.qmn=qmn
glob.qmx=qmx
prn_vol=str(volumes)
print("Number of data SCAN's: %3i:" % nvol)
print("Volumes: %s" % prn_vol)
def set_up():
for i in np.arange(glob.nvol):
qmn=glob.qmn[i]
qmx=glob.qmx[i]
anh[i]=anh_class()
anh[i].name=glob.scan_name[i]
anh[i].vol=glob.volumes[i]
anh[i].h_freq=glob.h_freq[i]
energy_data=np.loadtxt(path+glob.scan_name[i])
anh[i].q=energy_data[:,0].astype(float)
anh[i].q_orig=np.copy(anh[i].q)
energy=energy_data[:,1].astype(float)
min_e=np.min(energy)
anh[i].e=energy-min_e
if (qmn != 0.) or (qmx != 0.):
test=((anh[i].q >= qmn) & (anh[i].q <= qmx))
anh[i].q = anh[i].q[test]
anh[i].e = anh[i].e[test]
anh[i].vector=glob.mode_vect[:,i]
fh_crys=anh[i].h_freq*csl
anh[i].omega=2*np.pi*fh_crys
anh[i].qmax=np.sqrt(sum(anh[i].vector**2))
anh[i].q2max=(anh[i].qmax**2)*(bohr**2)
anh[i].red=ht/(anh[i].omega*anh[i].q2max);
anh[i].q=anh[i].q*anh[i].qmax
flag.comp=np.append(flag.comp, False)
flag.setup=True
def energy_func(qq, a, b, c, d):
return a+b*qq**2+c*qq**3+d*qq**4
def energy_quad(qq, a, b):
return a+b*qq**2
def start_fit(iv, npt=40):
q=anh[iv].q
e=anh[iv].e
fit_par,_ =curve_fit(energy_func,q,e)
fit_quad,_ =curve_fit(energy_quad,q,e)
anh[iv].par=fit_par
min_q=np.min(q)
max_q=np.max(q)
q_list=np.linspace(min_q,max_q,npt)
e4_list=np.array([])
e2_list=np.array([])
for iq in q_list:
ieq4=energy_func(iq,*anh[iv].par)
ieq2=energy_quad(iq,*fit_quad)
e4_list=np.append(e4_list,ieq4)
e2_list=np.append(e2_list,ieq2)
plt.figure()
plt.plot(q_list,e4_list,"-",label='Quartic fit')
plt.plot(q_list,e2_list,"--",label='Quadratic fit')
plt.plot(anh[iv].q,anh[iv].e,"*",label='Actual values')
plt.xlabel("Q")
plt.ylabel("E")
plt.legend(frameon=True)
plt.show()
anh[iv].ko=2*anh[iv].par[1]*conv/(bohr**2)
lam=anh[iv].par[3]
d3l=anh[iv].par[2]
anh[iv].zero_l=anh[iv].par[0]
anh[iv].om=np.sqrt(anh[iv].ko/anh[iv].red)
anh[iv].nu=anh[iv].om/(2*np.pi*csl)
anh[iv].lam=lam*conv/(bohr**4);
anh[iv].d3l=d3l*conv/(bohr**3);
anh[iv].fact=(ht/(2*anh[iv].red*anh[iv].om))**2;
anh[iv].factd=(ht/(2*anh[iv].red*anh[iv].om))**(3/2);
anh[iv].fact_1=anh[iv].lam*anh[iv].fact;
anh[iv].factd_1=iun*anh[iv].factd*anh[iv].d3l;
anh[iv].h_omeg=ht*anh[iv].om;
def diag_n(iv, n):
dn=(anh[iv].fact_1*6*(n**2+n+1/2))+(anh[iv].h_omeg*(n+1/2));
return dn
def extra_1(iv, n):
ext1=-3*anh[iv].factd_1*(n+1)*(np.sqrt(n+1));
return ext1
def extra_2(iv, n):
ext2=-2*anh[iv].fact_1*(3+2*n)*(np.sqrt((n+2)*(n+1)));
return ext2
def extra_3(iv, n):
ext3=anh[iv].factd_1*np.sqrt((n+3)*(n+2)*(n+1));
return ext3
def extra_4(iv, n):
ext4=anh[iv].fact_1*np.sqrt((n+4)*(n+3)*(n+2)*(n+1));
return ext4
def H_matrix(iv):
ind=np.arange(glob.dim)
H=np.zeros((glob.dim,glob.dim),dtype=complex)
for ii in ind:
for jj in ind:
if ii==jj:
H[jj][ii]=diag_n(iv, ii)
elif jj==ii+2:
H[jj][ii]=extra_2(iv, ii)
elif jj==ii-2:
H[jj][ii]=extra_2(iv, jj)
elif jj==ii+4:
H[jj][ii]=extra_4(iv, ii)
elif jj==ii-4:
H[jj][ii]=extra_4(iv, jj)
elif jj==ii+1:
H[jj][ii]=extra_1(iv, ii)
elif jj==ii-1:
H[jj][ii]=-1*extra_1(iv, jj)
elif jj==ii+3:
H[jj][ii]=extra_3(iv, ii)
elif jj==ii-3:
H[jj][ii]=-1*extra_3(iv, jj)
return H
def energy_anh(iv):
H_mat=H_matrix(iv)
vals=np.linalg.eigvals(H_mat)
vals=np.real(vals)
anh[iv].vals=np.sort(vals)
anh[iv].e_zero=anh[iv].zero_l+anh[iv].vals/conv
def partition(iv, temp, nl=10):
"""
Computes the partition function by direct summation of the
exponential terms. By default, the number of the energy levels
involved in the summation is in the variable glob.nlev, whose
value is 1/2 of the dimension of the Hamiltonian matrix.
Args:
v: volume index (according to the list of volumes specified
in the volumes.dat file)
temp: temperature (K)
nl: number of energy levels considered in the summation
(default: 10)
"""
lev_list=np.arange(nl)
z=0.
for i in lev_list:
z=z+np.exp(-1*anh[iv].vals[i]/(k*temp))
return z
def helm(iv, temp):
"""
Computes the Helmholtz free energy (in hartree)
Args:
iv: volume index (according to the list of volumes specified
in the volumes.dat file)
temp: temperature (K)
"""
z=partition(iv, temp, nl=glob.nlev)
return -1*k*temp*np.log(z)/conv
def check_partition(iv, temp, from_plot=False):
"""
Checks convergence of the partition function at a given
temperature
Args:
iv: volume index (according to the list of volumes specified
in the volumes.dat file)
temp: temperature (k)
"""
tol_der=0.005
min_lev=5
max_lev=glob.nlev
lev_list=np.arange(min_lev,max_lev)
z_list=np.array([])
for il in lev_list:
iz=partition(iv,temp,il)
z_list=np.append(z_list,iz)
der_z=np.gradient(z_list)
tlt="Partition function: convergence test for T = " + str(temp) + " K"
plt.figure()
plt.plot(lev_list, z_list)
plt.title(tlt)
plt.xlabel('Number of vibrational levels')
plt.ylabel('Partition function')
plt.show()
test=(der_z >= tol_der)
st=sum(test)+min_lev
print("Threshold for convergence (on the variation of Z): %4.4f" % tol_der)
if (st < glob.nlev):
print("Convergence reached at the %3i level" % st)
else:
print("Warning: convergence never reached")
eth=anh[iv].e_zero[st]
test_scan=(eth-anh[iv].e) >= 0.
zero_scan=True
scan_sum=sum(test_scan)
if scan_sum == 0:
zero_scan=False
if zero_scan:
min_q=0.
max_q=0.
q_test=anh[iv].q[test_scan]
min_q= | np.min(q_test) | numpy.min |
import numpy as np
import math
from cereal import log
from common.numpy_fast import interp
from common.params import Params
from common.realtime import sec_since_boot
from selfdrive.config import Conversions as CV
from selfdrive.controls.lib.lane_planner import TRAJECTORY_SIZE
from selfdrive.controls.lib.drive_helpers import V_CRUISE_MAX
_MIN_V = 5.6 # Do not operate under 20km/h
_ENTERING_PRED_LAT_ACC_TH = 1.3 # Predicted Lat Acc threshold to trigger entering turn state.
_ABORT_ENTERING_PRED_LAT_ACC_TH = 1.1 # Predicted Lat Acc threshold to abort entering state if speed drops.
_TURNING_LAT_ACC_TH = 1.6 # Lat Acc threshold to trigger turning turn state.
_LEAVING_LAT_ACC_TH = 1.3 # Lat Acc threshold to trigger leaving turn state.
_FINISH_LAT_ACC_TH = 1.1 # Lat Acc threshold to trigger end of turn cycle.
_EVAL_STEP = 5. # mts. Resolution of the curvature evaluation.
_EVAL_START = 20. # mts. Distance ahead where to start evaluating vision curvature.
_EVAL_LENGHT = 150. # mts. Distance ahead where to stop evaluating vision curvature.
_EVAL_RANGE = np.arange(_EVAL_START, _EVAL_LENGHT, _EVAL_STEP)
_A_LAT_REG_MAX = 2. # Maximum lateral acceleration
_NO_OVERSHOOT_TIME_HORIZON = 4. # s. Time to use for velocity desired based on a_target when not overshooting.
# Lookup table for the minimum smooth deceleration during the ENTERING state
# depending on the actual maximum absolute lateral acceleration predicted on the turn ahead.
_ENTERING_SMOOTH_DECEL_V = [-0.2, -1.] # min decel value allowed on ENTERING state
_ENTERING_SMOOTH_DECEL_BP = [1.3, 3.] # absolute value of lat acc ahead
# Lookup table for the acceleration for the TURNING state
# depending on the current lateral acceleration of the vehicle.
_TURNING_ACC_V = [0.5, 0., -0.4] # acc value
_TURNING_ACC_BP = [1.5, 2.3, 3.] # absolute value of current lat acc
_LEAVING_ACC = 0.5 # Confortble acceleration to regain speed while leaving a turn.
_MIN_LANE_PROB = 0.6 # Minimum lanes probability to allow curvature prediction based on lanes.
_DEBUG = False
def _debug(msg):
if not _DEBUG:
return
print(msg)
VisionTurnControllerState = log.LongitudinalPlan.VisionTurnControllerState
def eval_curvature(poly, x_vals):
"""
This function returns a vector with the curvature based on path defined by `poly`
evaluated on distance vector `x_vals`
"""
# https://en.wikipedia.org/wiki/Curvature# Local_expressions
def curvature(x):
a = abs(2 * poly[1] + 6 * poly[0] * x) / (1 + (3 * poly[0] * x**2 + 2 * poly[1] * x + poly[2])**2)**(1.5)
return a
return np.vectorize(curvature)(x_vals)
def eval_lat_acc(v_ego, x_curv):
"""
This function returns a vector with the lateral acceleration based
for the provided speed `v_ego` evaluated over curvature vector `x_curv`
"""
def lat_acc(curv):
a = v_ego**2 * curv
return a
return np.vectorize(lat_acc)(x_curv)
def _description_for_state(turn_controller_state):
if turn_controller_state == VisionTurnControllerState.disabled:
return 'DISABLED'
if turn_controller_state == VisionTurnControllerState.entering:
return 'ENTERING'
if turn_controller_state == VisionTurnControllerState.turning:
return 'TURNING'
if turn_controller_state == VisionTurnControllerState.leaving:
return 'LEAVING'
class VisionTurnController():
def __init__(self, CP):
self._params = Params()
self._CP = CP
self._op_enabled = False
self._gas_pressed = False
self._is_enabled = self._params.get_bool("TurnVisionControl")
self._last_params_update = 0.
self._v_cruise_setpoint = 0.
self._v_ego = 0.
self._a_ego = 0.
self._a_target = 0.
self._v_overshoot = 0.
self._state = VisionTurnControllerState.disabled
self._reset()
@property
def state(self):
return self._state
@state.setter
def state(self, value):
if value != self._state:
_debug(f'TVC: TurnVisionController state: {_description_for_state(value)}')
if value == VisionTurnControllerState.disabled:
self._reset()
self._state = value
@property
def a_target(self):
return self._a_target if self.is_active else self._a_ego
@property
def v_turn(self):
if not self.is_active:
return self._v_cruise_setpoint
return self._v_overshoot if self._lat_acc_overshoot_ahead \
else self._v_ego + self._a_target * _NO_OVERSHOOT_TIME_HORIZON
@property
def is_active(self):
return self._state != VisionTurnControllerState.disabled
def _reset(self):
self._current_lat_acc = 0.
self._max_v_for_current_curvature = 0.
self._max_pred_lat_acc = 0.
self._v_overshoot_distance = 200.
self._lat_acc_overshoot_ahead = False
def _update_params(self):
time = sec_since_boot()
if time > self._last_params_update + 5.0:
self._is_enabled = self._params.get_bool("TurnVisionControl")
self._last_params_update = time
def _update_calculations(self, sm):
# Get path polynomial aproximation for curvature estimation from model data.
path_poly = None
model_data = sm['modelV2'] if sm.valid.get('modelV2', False) else None
lat_planner_data = sm['lateralPlan'] if sm.valid.get('lateralPlan', False) else None
# 1. When the probability of lanes is good enough, compute polynomial from lanes as they are way more stable
# on current mode than drving path.
if model_data is not None and len(model_data.laneLines) == 4 and len(model_data.laneLines[0].t) == TRAJECTORY_SIZE:
ll_x = model_data.laneLines[1].x # left and right ll x is the same
lll_y = np.array(model_data.laneLines[1].y)
rll_y = np.array(model_data.laneLines[2].y)
l_prob = model_data.laneLineProbs[1]
r_prob = model_data.laneLineProbs[2]
lll_std = model_data.laneLineStds[1]
rll_std = model_data.laneLineStds[2]
# Reduce reliance on lanelines that are too far apart or will be in a few seconds
width_pts = rll_y - lll_y
prob_mods = []
for t_check in [0.0, 1.5, 3.0]:
width_at_t = interp(t_check * (self._v_ego + 7), ll_x, width_pts)
prob_mods.append(interp(width_at_t, [4.0, 5.0], [1.0, 0.0]))
mod = min(prob_mods)
l_prob *= mod
r_prob *= mod
# Reduce reliance on uncertain lanelines
l_std_mod = interp(lll_std, [.15, .3], [1.0, 0.0])
r_std_mod = interp(rll_std, [.15, .3], [1.0, 0.0])
l_prob *= l_std_mod
r_prob *= r_std_mod
# Find path from lanes as the average center lane only if min probability on both lanes is above threshold.
if l_prob > _MIN_LANE_PROB and r_prob > _MIN_LANE_PROB:
c_y = width_pts / 2 + lll_y
path_poly = np.polyfit(ll_x, c_y, 3)
# 2. If not polynomial derived from lanes, then derive it from compensated driving path with lanes as
# provided by `lateralPlanner`.
if path_poly is None and lat_planner_data is not None and len(lat_planner_data.dPathWLinesX) > 0 \
and lat_planner_data.dPathWLinesX[0] > 0:
path_poly = | np.polyfit(lat_planner_data.dPathWLinesX, lat_planner_data.dPathWLinesY, 3) | numpy.polyfit |
import argparse
import torch
import numpy as np
from scipy.stats import sem, spearmanr
from pandas import read_csv
from pathlib import Path
import sys
file = Path(__file__). resolve()
package_root_directory = file.parents [1]
sys.path.append(str(package_root_directory))
from DataLoader.dataset import Dataset
from DataLoader.collate import custom_collate
from Utils.transformation import Transformation
from Utils.record import record
import matplotlib.pyplot as plt
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,
AutoMinorLocator)
import matplotlib as mpl
mpl.rcParams['mathtext.fontset'] = 'cm'
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
cm = plt.get_cmap('Set1')
cm2 = plt.get_cmap('Set2')
parser = argparse.ArgumentParser('Predict longitudinal all times')
parser.add_argument('--job_id', type=int)
parser.add_argument('--epoch', type=int)
args = parser.parse_args()
device = 'cpu'
N = 29
dt = 0.5
length = 50
pop_avg = np.load('../Data/Population_averages.npy')
pop_avg_env = np.load('../Data/Population_averages_env.npy')
pop_std = np.load('../Data/Population_std.npy')
pop_avg_ = torch.from_numpy(pop_avg[...,1:]).float()
pop_avg_env = torch.from_numpy(pop_avg_env).float()
pop_std = torch.from_numpy(pop_std[...,1:]).float()
pop_avg_bins = np.arange(40, 105, 3)[:-2]
missing = [[[] for y in range(10)] for i in range(N)]
notmissing = [[[] for y in range(10)] for i in range(N)]
linear_notmissing = [[[] for y in range(10)] for i in range(N)]
exact_missing = [[[] for y in range(10)] for i in range(N)]
exact_notmissing = [[[] for y in range(10)] for i in range(N)]
weights_notmissing = [[[] for y in range(10)] for i in range(N)]
weights_missing = [[[] for y in range(10)] for i in range(N)]
first_notmissing = [[[] for y in range(10)] for i in range(N)]
pop_missing = [[[] for y in range(10)] for i in range(N)]
pop_notmissing = [[[] for y in range(10)] for i in range(N)]
test_name = '../Data/test.csv'
test_set = Dataset(test_name, N, pop=False, min_count = 10)
num_test = test_set.__len__()
test_generator = torch.utils.data.DataLoader(test_set, batch_size = num_test, shuffle = False, collate_fn = lambda x: custom_collate(x, pop_avg_, pop_avg_env, pop_std, 1.0))
mean_deficits = read_csv('../Data/mean_deficits.txt', index_col=0,sep=',',header=None, names = ['variable']).values[1:].flatten()
std_deficits = read_csv('../Data/std_deficits.txt', index_col=0,sep=',',header=None, names = ['variable']).values[1:].flatten()
psi = Transformation(mean_deficits[:-3], std_deficits[:-3], [6, 7, 15, 16, 23, 25, 26, 28])
with torch.no_grad():
mean = np.load('../Analysis_Data/Mean_trajectories_job_id%d_epoch%d_DJIN.npy'%(args.job_id,args.epoch))
linear = np.load('../Comparison_models/Results/Longitudinal_predictions_baseline_id21_rfmice_test.npy')
# transform models
mean[:,:,1:] = psi.untransform(mean[:,:,1:])
linear[:,:,1:] = psi.untransform(linear[:,:,1:])
pop_avg_ = psi.untransform(pop_avg_.numpy())
mean_impute = np.zeros(mean.shape)
for yi, years in enumerate([0, 2, 4, 6, 8, 10, 12, 14, 16, 18]):
start = 0
for data in test_generator:
break
y = data['Y']
times = data['times']
mask = data['mask']
sample_weight = data['weights'].numpy()
sex_index = data['env'][:,12].long().numpy()
# transform data
y = psi.untransform(y.numpy())
y = mask*y + (1-mask)*(-1000)
record_times = []
record_y = []
record_mask = []
for b in range(num_test):
observed = torch.sum(mask[b,:, :], dim = -1) > 0
record_times.append(times[b, observed].numpy().astype(int))
record_y.append(y[b, observed, :].numpy())
record_mask.append(mask[b, observed, :].numpy().astype(int))
if yi == 0:
continue
else:
for b in range(num_test):
t = 0
for t_rec in range(len(record_times[b])):
t_index = np.digitize(record_times[b][t_rec], pop_avg_bins, right=True)
pop_data_t = pop_avg_[sex_index[b], t_index]
while t < min(40, int(np.sum(~np.isnan(mean[b,:,1])))):
if record_times[b][t_rec] == mean[b, t, 0].astype(int):
for n in range(N):
if record_mask[b][t_rec, n] > 0 and int(record_times[b][t_rec] - record_times[b][0]) < years + 1 and int(record_times[b][t_rec] - record_times[b][0]) >= years -1:
# missing
if record_mask[b][0, n] < 1:
missing[n][yi].append(mean[b, t, n+1])
exact_missing[n][yi].append(record_y[b][t_rec, n])
weights_missing[n][yi].append(sample_weight[b])
pop_missing[n][yi].append(pop_data_t[n])
else:
notmissing[n][yi].append(mean[b, t, n+1])
exact_notmissing[n][yi].append(record_y[b][t_rec, n])
weights_notmissing[n][yi].append(sample_weight[b])
first_notmissing[n][yi].append(record_y[b][0, n])
pop_notmissing[n][yi].append(pop_data_t[n])
linear_notmissing[n][yi].append(linear[b, t, n+1])
break
t += 1
R2_missing = | np.zeros((10, N)) | numpy.zeros |
# -*- coding: utf-8 -*-
#==========================================
# Title: additive_gp.py
# Author: <NAME> and <NAME>
# Date: 20 August 2019
# Link: https://arxiv.org/abs/1906.08878
#==========================================
from typing import Union, Tuple
import GPy
import numpy as np
from paramz.transformations import Logexp
from ..models import GP
class GPWithSomeFixedDimsAtStart(GP):
"""
Utility class that allows for predict() interface while only providing
a subset of the inputs and filling in the missing ones.
If the fixed dims are h and the provided values are x,
then the predict() function returns the posterior at z = [h, x]
"""
def __init__(self, *args, fixed_dim_vals=None, **kwargs):
super().__init__(*args, **kwargs)
assert fixed_dim_vals is not None
self.fixed_dim_vals = np.array(fixed_dim_vals).flatten()
def add_fixed_to_x(self, x_star):
h_star = np.vstack([self.fixed_dim_vals] * len(x_star))
z_star = np.hstack((h_star, x_star))
return z_star
def predict_latent(self, x_star: np.ndarray, full_cov: bool = False,
kern=None):
"""
Predict at z = [h, x]
"""
return super().predict_latent(self.add_fixed_to_x(x_star),
full_cov, kern)
def dposterior_dx(self, x_star: np.ndarray) \
-> Tuple[np.ndarray, np.ndarray]:
return super().dposterior_dx(self.add_fixed_to_x(x_star))
class MixtureViaSumAndProduct(GPy.kern.Kern):
"""
Kernel of the form
k = (1-mix)*(k1 + k2) + mix*k1*k2
Parameters
----------
input_dim
number of all dims (for k1 and k2 together)
k1
First kernel
k2
Second kernel
active_dims
active dims of this kernel
mix
see equation above
fix_variances
unlinks the variance parameters if set to True
fix_mix
Does not register mix as a parameter that can be learned
"""
def __init__(self, input_dim: int, k1: GPy.kern.Kern, k2: GPy.kern.Kern,
active_dims: Union[list, np.ndarray] = None, variance=1.0,
mix: float = 0.5,
fix_inner_variances: bool = False, fix_mix=True,
fix_variance=True):
super().__init__(input_dim, active_dims, 'MixtureViaSumAndProduct')
self.acceptable_kernels = (GPy.kern.RBF, GPy.kern.Matern52,
CategoryOverlapKernel
)
assert isinstance(k1, self.acceptable_kernels)
assert isinstance(k2, self.acceptable_kernels)
self.mix = GPy.core.parameterization.Param('mix', mix, Logexp())
self.variance = GPy.core.parameterization.Param('variance', variance,
Logexp())
self.fix_variance = fix_variance
if not self.fix_variance:
self.link_parameter(self.variance)
# If we are learning the mix, then add it as a visible param
self.fix_mix = fix_mix
if not self.fix_mix:
self.link_parameter(self.mix)
self.k1 = k1
self.k2 = k2
self.fix_inner_variances = fix_inner_variances
if self.fix_inner_variances:
self.k1.unlink_parameter(self.k1.variance)
self.k2.unlink_parameter(self.k2.variance)
self.link_parameters(self.k1, self.k2)
def get_dk_dtheta(self, k: GPy.kern.Kern, X, X2=None):
assert isinstance(k, self.acceptable_kernels)
if X2 is None:
X2 = X
X_sliced, X2_sliced = X[:, k.active_dims], X2[:, k.active_dims]
if isinstance(k, (GPy.kern.RBF, GPy.kern.Matern52)):
dk_dr = k.dK_dr_via_X(X_sliced, X2_sliced)
# dr/dl
if k.ARD:
tmp = k._inv_dist(X_sliced, X2_sliced)
dr_dl = -np.dstack([tmp * np.square(
X_sliced[:, q:q + 1] - X2_sliced[:, q:q + 1].T) /
k.lengthscale[q] ** 3
for q in range(k.input_dim)])
dk_dl = dk_dr[..., None] * dr_dl
else:
r = k._scaled_dist(X_sliced, X2_sliced)
dr_dl = - r / k.lengthscale
dk_dl = dk_dr * dr_dl
# # For testing the broadcast multiplication
# dk_dl_slow = []
# for ii in range(dr_dl.shape[-1]):
# dr_dlj = dr_dl[...,ii]
# dk_dlj = dk_dr * dr_dlj
# dk_dl_slow.append(dk_dlj)
#
# dk_dl_slow = np.dstack(dk_dl_slow)
elif isinstance(k, CategoryOverlapKernel):
dk_dl = None
else:
raise NotImplementedError
# Return variance grad as well, if not fixed
if not self.fix_inner_variances:
return k.K(X, X2) / k.variance, dk_dl
else:
return dk_dl
def update_gradients_full(self, dL_dK, X, X2=None):
# This gets the values of dk/dtheta as a NxN matrix (no summations)
if X2 is None:
X2 = X
dk1_dtheta1 = self.get_dk_dtheta(self.k1, X, X2) # N x N
dk2_dtheta2 = self.get_dk_dtheta(self.k2, X, X2) # N x N
# Separate the variance and lengthscale grads (for ARD purposes)
if self.fix_inner_variances:
dk1_dl1 = dk1_dtheta1
dk2_dl2 = dk2_dtheta2
dk1_dvar1 = []
dk2_dvar2 = []
else:
dk1_dvar1, dk1_dl1 = dk1_dtheta1
dk2_dvar2, dk2_dl2 = dk2_dtheta2
# Evaluate each kernel over its own subspace
k1_xx = self.k1.K(X, X2) # N x N
k2_xx = self.k2.K(X, X2) # N x N
# dk/dl for l1 and l2
# If gradient is None, then vars other than lengthscale don't exist.
# This is relevant for the CategoryOverlapKernel
if dk1_dl1 is not None:
# ARD requires a summation along last axis for each lengthscale
if hasattr(self.k1, 'ARD') and self.k1.ARD:
dk_dl1 = np.sum(
dL_dK[..., None] * (
0.5 * dk1_dl1 * (1 - self.mix) * self.variance
+ self.mix * self.variance * dk1_dl1 *
k2_xx[..., None]),
(0, 1))
else:
dk_dl1 = np.sum(
dL_dK * (0.5 * dk1_dl1 * (1 - self.mix) * self.variance
+ self.mix * self.variance * dk1_dl1 * k2_xx))
else:
dk_dl1 = []
if dk2_dl2 is not None:
if hasattr(self.k2, 'ARD') and self.k2.ARD:
dk_dl2 = np.sum(
dL_dK[..., None] * (
0.5 * dk2_dl2 * (1 - self.mix) * self.variance
+ self.mix * self.variance * dk2_dl2 *
k1_xx[..., None]),
(0, 1))
else:
dk_dl2 = np.sum(
dL_dK * (0.5 * dk2_dl2 * (1 - self.mix) * self.variance
+ self.mix * self.variance * dk2_dl2 * k1_xx))
else:
dk_dl2 = []
# dk/dvar for var1 and var 2
if self.fix_inner_variances:
dk_dvar1 = []
dk_dvar2 = []
else:
dk_dvar1 = np.sum(
dL_dK * (0.5 * dk1_dvar1 * (1 - self.mix) * self.variance
+ self.mix * self.variance * dk1_dvar1 * k2_xx))
dk_dvar2 = np.sum(
dL_dK * (0.5 * dk2_dvar2 * (1 - self.mix) * self.variance
+ self.mix * self.variance * dk2_dvar2 * k1_xx))
# Combining the gradients into one vector and updating
dk_dtheta1 = | np.hstack((dk_dvar1, dk_dl1)) | numpy.hstack |
# -*- coding: utf-8 -*-
# Run this app with `python3 sens_matrix_dashboard.py` and
# view the plots at http://127.0.0.1:8050/ in your web browser.
# (To open a web browser on a larson-group computer,
# login to malan with `ssh -X` and then type `firefox &`.)
def main():
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.express as px
import plotly.graph_objects as go
import pandas as pd
import numpy as np
import pdb
import sklearn
from plotly.figure_factory import create_quiver
from itertools import chain
from analyze_sensitivity_matrix import \
analyzeSensMatrix, setupObsCol, setupDefaultMetricValsCol, \
findOutliers, findParamsUsingElastic
from test_analyzeSensMatrix import write_test_netcdf_files
# Metrics are observed quantities that we want a tuned simulation to match.
# The order of metricNames determines the order of rows in sensMatrix.
# Column vector of (positive) weights. A small value de-emphasizes
# the corresponding metric in the fit.
metricsNamesAndWeights = [ \
['SWCF_GLB', 4.01], \
['SWCF_DYCOMS', 1.01], \
['SWCF_HAWAII', 1.01], \
['SWCF_VOCAL', 1.01], \
['SWCF_LBA', 1.01], \
['SWCF_WP', 1.01], \
['SWCF_EP', 1.01], \
['SWCF_NP', 1.01], \
['SWCF_SP', 1.01], \
## ['SWCF_PA', 1.01], \
['SWCF_CAF', 1.01], \
['LWCF_GLB', 4.01], \
# ['LWCF_DYCOMS', 1.01], \
# ['LWCF_HAWAII', 1.01], \
# ['LWCF_VOCAL', 1.01], \
['LWCF_LBA', 1.01], \
['LWCF_WP', 1.01], \
# ['LWCF_EP', 1.01], \
# ['LWCF_NP', 1.01], \
# ['LWCF_SP', 1.01], \
## ['LWCF_PA', 1.01], \
# ['LWCF_CAF', 1.01], \
['PRECT_GLB', 4.01], \
['PRECT_LBA', 1.01], \
['PRECT_WP', 1.01], \
# ['PRECT_EP', 1.01], \
# ['PRECT_NP', 1.01], \
# ['PRECT_SP', 1.01], \
## ['PRECT_PA', 1.01], \
['PRECT_CAF', 1.01] \
]
# ['PRECT_DYCOMS', 0.01], \
# ['PRECT_HAWAII', 0.01], \
# ['PRECT_VOCAL', 0.01], \
dfMetricsNamesAndWeights = \
pd.DataFrame( metricsNamesAndWeights, columns = ['metricsNames', 'metricsWeights'] )
metricsNames = dfMetricsNamesAndWeights[['metricsNames']].to_numpy().astype(str)[:,0]
metricsWeights = dfMetricsNamesAndWeights[['metricsWeights']].to_numpy()
# Parameters are tunable model parameters, e.g. clubb_C8.
# The float listed below is a factor that is used below for scaling plots.
# Each parameter is associated with two sensitivity simulations in which that parameter is perturbed
# either up or down.
# The output from each sensitivity simulation is expected to be stored in its own netcdf file.
# Each netcdf file contains metric values and parameter values for a single simulation.
paramsNamesScalesAndFilenames = [ \
## ['clubb_c7', 1.0, \
## '20220516/sens.tau_2_Regional.nc', \
## '20220516/sens.tau_3_Regional.nc'], \
['clubb_c11', 1.0, \
'20220516/sens.tau_4_Regional.nc', \
'20220516/sens.tau_5_Regional.nc'], \
['clubb_gamma_coef', 1.0, \
'20220516/sens.tau_6_Regional.nc', \
'20220516/sens.tau_7_Regional.nc'], \
## ['clubb_c8', 1.0, \
## '20220516/sens.tau_9_Regional.nc', \
## '20220516/sens.tau_8_Regional.nc'], \
['clubb_c_k10', 1.0, \
'20220516/sens.tau_10_Regional.nc', \
'20220516/sens.tau_11_Regional.nc'], \
['clubb_c_invrs_tau_n2', 1.0, \
'20220516/sens.tau_12_Regional.nc',
'20220516/sens.tau_13_Regional.nc'], \
## ['clubb_c_invrs_tau_wpxp_n2_thresh', 1.e3, \
## '20220516/sens.tau_14_Regional.nc', \
## '20220516/sens.tau_15_Regional.nc'], \
## ['micro_vqit', 1.0, \
## '20220516/sens.tau_16_Regional.nc', \
## '20220516/sens.tau_17_Regional.nc'], \
]
dfparamsNamesScalesAndFilenames = \
pd.DataFrame( paramsNamesScalesAndFilenames, \
columns = ['paramsNames', 'paramsScales',
'sensNcFilenamesExt', 'sensNcFilenames'] )
paramsNames = dfparamsNamesScalesAndFilenames[['paramsNames']].to_numpy().astype(str)[:,0]
# Extract scaling factors of parameter values from user-defined list paramsNamesScalesAndFilenames.
# The scaling is not used for any calculations, but it allows us to avoid plotting very large or small values.
paramsScales = dfparamsNamesScalesAndFilenames[['paramsScales']].to_numpy().astype(float)[:,0]
sensNcFilenames = dfparamsNamesScalesAndFilenames[['sensNcFilenames']].to_numpy().astype(str)[:,0]
sensNcFilenamesExt = dfparamsNamesScalesAndFilenames[['sensNcFilenamesExt']].to_numpy().astype(str)[:,0]
# This the subset of paramsNames that vary from [0,1] (e.g., C5)
# and hence will be transformed to [0,infinity] in order to make
# the relationship between parameters and metrics more linear:
#transformedParamsNames = np.array(['clubb_c8','clubb_c_invrs_tau_n2', 'clubb_c_invrs_tau_n2_clear_wp3'])
transformedParamsNames = np.array([''])
# Netcdf file containing metric and parameter values from the default simulation
defaultNcFilename = \
'20220516/sens.tau_1_Regional.nc'
# Metrics from simulation that use the SVD-recommended parameter values
# Here, we use default simulation just as a placeholder.
linSolnNcFilename = \
'20220516/sens.tau_1_Regional.nc'
# Observed values of our metrics, from, e.g., CERES-EBAF.
# These observed metrics will be matched as closely as possible by analyzeSensMatrix.
# NOTE: PRECT is in the unit of m/s
obsMetricValsDict = { \
'LWCF_GLB': 28.008, 'PRECT_GLB': 0.000000031134259, 'SWCF_GLB': -45.81, 'TMQ_GLB': 24.423, \
'LWCF_DYCOMS': 19.36681938, 'PRECT_DYCOMS':0.000000007141516, 'SWCF_DYCOMS': -63.49394226, 'TMQ_DYCOMS':20.33586884,\
'LWCF_LBA': 43.83245087, 'PRECT_LBA':0.000000063727875, 'SWCF_LBA': -55.10041809, 'TMQ_LBA': 44.27890396,\
'LWCF_HAWAII': 24.78801537, 'PRECT_HAWAII':0.000000020676041, 'SWCF_HAWAII': -36.49626541, 'TMQ_HAWAII': 33.17501068,\
'LWCF_WP': 54.73321152, 'PRECT_WP':0.000000078688704, 'SWCF_WP': -62.09819031, 'TMQ_WP':51.13026810,\
'LWCF_EP': 33.42149734, 'PRECT_EP': 0.000000055586694, 'SWCF_EP': -51.79394531, 'TMQ_EP':44.34251404,\
'LWCF_NP': 26.23941231, 'PRECT_NP':0.000000028597503, 'SWCF_NP': -50.92364502, 'TMQ_NP':12.72111988,\
'LWCF_SP': 31.96141052, 'PRECT_SP':0.000000034625369, 'SWCF_SP': -70.26461792, 'TMQ_SP':10.95032024,\
'LWCF_PA': 47.32126999, 'PRECT_PA':0.000000075492694, 'SWCF_PA': -78.27433014, 'TMQ_PA':47.25967789,\
'LWCF_CAF': 43.99757003784179687500, 'PRECT_CAF':0.000000042313699, 'SWCF_CAF': -52.50243378, 'TMQ_CAF':36.79592514,\
'LWCF_VOCAL': 43.99757004, 'PRECT_VOCAL':0.000000001785546, 'SWCF_VOCAL': -77.26232147, 'TMQ_VOCAL':17.59922791 }
# Estimate non-linearity of the global model to perturbations in parameter values.
# To do so, calculate radius of curvature of the three points from the default simulation
# and the two sensitivity simulations.
#calcNormlzdRadiusCurv(metricsNames, paramsNames, transformedParamsNames,
# metricsWeights,
# sensNcFilenames, sensNcFilenamesExt, defaultNcFilename)
# Set up a column vector of observed metrics
obsMetricValsCol = setupObsCol(obsMetricValsDict, metricsNames)
# Calculate changes in parameter values needed to match metrics.
defaultMetricValsCol, defaultBiasesCol, \
defaultBiasesApprox, defaultBiasesApproxLowVals, defaultBiasesApproxHiVals, \
defaultBiasesApproxPC, defaultBiasesApproxLowValsPC, defaultBiasesApproxHiValsPC, \
normlzdWeightedDefaultBiasesApprox, normlzdWeightedDefaultBiasesApproxPC, \
defaultBiasesOrigApprox, defaultBiasesOrigApproxPC, \
sensMatrixOrig, sensMatrix, normlzdSensMatrix, \
normlzdWeightedSensMatrix, biasNormlzdSensMatrix, svdInvrsNormlzdWeighted, \
vhNormlzd, uNormlzd, sNormlzd, \
vhNormlzdWeighted, uNormlzdWeighted, sNormlzdWeighted, \
magParamValsRow, \
defaultParamValsOrigRow, dparamsSoln, dnormlzdParamsSoln, \
dparamsSolnPC, dnormlzdParamsSolnPC, \
paramsSoln, paramsLowVals, paramsHiVals, \
paramsSolnPC, paramsLowValsPC, paramsHiValsPC = \
analyzeSensMatrix(metricsNames, paramsNames, transformedParamsNames,
metricsWeights,
sensNcFilenames, defaultNcFilename,
obsMetricValsDict)
paramsLowValsPCBound, paramsHiValsPCBound = \
calcParamsBounds(metricsNames, paramsNames, transformedParamsNames,
metricsWeights, obsMetricValsCol,
magParamValsRow,
sensNcFilenames, sensNcFilenamesExt, defaultNcFilename)
# Create scatterplot to look at outliers
#createPcaBiplot(normlzdSensMatrix, defaultBiasesCol, obsMetricValsCol, metricsNames, paramsNames)
# Find outliers by use of the ransac algorithm
outlier_mask, defaultBiasesApproxRansac, normlzdWeightedDefaultBiasesApproxRansac, \
dnormlzdParamsSolnRansac, paramsSolnRansac = \
findOutliers(normlzdSensMatrix, normlzdWeightedSensMatrix, \
defaultBiasesCol, obsMetricValsCol, magParamValsRow, defaultParamValsOrigRow)
print( "ransac_outliers = ", metricsNames[outlier_mask] )
print( "ransac_inliers = ", metricsNames[~outlier_mask] )
#pdb.set_trace()
# Find best-fit params by use of the Elastic Net algorithm
defaultBiasesApproxElastic, normlzdWeightedDefaultBiasesApproxElastic, \
dnormlzdParamsSolnElastic, paramsSolnElastic = \
findParamsUsingElastic(normlzdSensMatrix, normlzdWeightedSensMatrix, \
defaultBiasesCol, obsMetricValsCol, metricsWeights, magParamValsRow, defaultParamValsOrigRow)
defaultBiasesApproxElasticCheck = ( normlzdWeightedSensMatrix @ dnormlzdParamsSolnElastic ) \
* np.reciprocal(metricsWeights) * np.abs(obsMetricValsCol)
print("defaultBiasesApproxElastic = ", defaultBiasesApproxElastic)
print("defaultBiasesApproxElasticCheck = ", defaultBiasesApproxElasticCheck)
#pdb.set_trace()
# Set up a column vector of metric values from the default simulation
defaultMetricValsCol = setupDefaultMetricValsCol(metricsNames, defaultNcFilename)
# Set up a column vector of metric values from the global simulation based on optimized
# parameter values.
linSolnMetricValsCol = setupDefaultMetricValsCol(metricsNames, linSolnNcFilename)
# Store biases in default simulation, ( global_model - default )
linSolnBiasesCol = np.subtract(linSolnMetricValsCol, defaultMetricValsCol)
# Calculate the fraction of the default-sim bias that remains after tuning.
# This is unweighted and hence is not necessarily less than one.
# defaultBiasesApprox = J*delta_p = ( fwd - def )
# numerator = ( fwd - def ) + ( def - obs ) = ( fwd - obs )
Bias = ( defaultBiasesApprox + defaultBiasesCol )
# defaultBiasesCol = delta_b = ( default - obs ) = denominator
BiasMagRatio = np.linalg.norm(Bias/np.abs(obsMetricValsCol))**2 / \
np.linalg.norm(defaultBiasesCol/np.abs(obsMetricValsCol))**2
# Calculate the fraction of the default-sim bias that remains after tuning,
# but using a truncated PC observation.
# This is unweighted and hence is not necessarily less than one.
# defaultBiasesApproxPC = J*delta_p = ( fwd - def )
# numerator = ( fwd - def ) + ( def - obs ) = ( fwd - obs )
BiasPC = ( defaultBiasesApproxPC + defaultBiasesCol )
# defaultBiasesCol = delta_b = ( default - obs ) = denominator
BiasPCMagRatio = np.linalg.norm(BiasPC/np.abs(obsMetricValsCol))**2 / \
np.linalg.norm(defaultBiasesCol/np.abs(obsMetricValsCol))**2
# Calculate the fraction of the default-sim bias that remains after tuning,
# but using a truncated PC observation.
# This is unweighted and hence is not necessarily less than one.
# defaultBiasesApproxRansac = J*delta_p = ( fwd - def )
# numerator = ( fwd - def ) + ( def - obs ) = ( fwd - obs )
BiasRansac = ( defaultBiasesApproxRansac + defaultBiasesCol )
# defaultBiasesCol = delta_b = ( default - obs ) = denominator
BiasRansacMagRatio = np.linalg.norm(BiasRansac/np.abs(obsMetricValsCol))**2 / \
np.linalg.norm(defaultBiasesCol/np.abs(obsMetricValsCol))**2
# Calculate the fraction of the default-sim bias that remains after tuning,
# but using a truncated PC observation.
# This is unweighted and hence is not necessarily less than one.
# defaultBiasesApproxElastic = J*delta_p = ( fwd - def )
# numerator = ( fwd - def ) + ( def - obs ) = ( fwd - obs )
BiasElastic = ( defaultBiasesApproxElastic + defaultBiasesCol )
# defaultBiasesCol = delta_b = ( default - obs ) = denominator
BiasElasticMagRatio = np.linalg.norm(BiasElastic/np.abs(obsMetricValsCol))**2 / \
np.linalg.norm(defaultBiasesCol/np.abs(obsMetricValsCol))**2
# Calculate the global-model bias relative to the default-sim bias.
# This is unweighted and hence is not necessarily less than one.
# defaultBiasesApprox = J*delta_p = ( fwd - def )
# numerator = ( linSoln - def ) + ( def - obs ) = ( linSoln - obs )
linSolnBias = ( linSolnBiasesCol + defaultBiasesCol )
# defaultBiasesCol = delta_b = ( default - obs ) = denominator
linSolnBiasMagRatio = np.linalg.norm(linSolnBias/np.abs(obsMetricValsCol))**2 / \
np.linalg.norm(defaultBiasesCol/np.abs(obsMetricValsCol))**2
# Calculate the fraction of bias removed by the non-PC soln, but normalized and weighted,
# like the equations that the SVD actually solves, so that according to theory,
# the value should be < 1.
# But I'm not sure if it will be < 1 if the parameters are transformed to log space.
normlzdMDeltaB = metricsWeights * defaultBiasesCol / | np.abs(obsMetricValsCol) | numpy.abs |
"""Test functions.
This module implements several known mathematical functions, that can
be used to test RBFOpt.
Licensed under Revised BSD license, see LICENSE.
(C) Copyright Singapore University of Technology and Design 2014.
(C) Copyright International Business Machines Corporation 2017.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import sys
import math
import numpy as np
from rbfopt.rbfopt_black_box import RbfoptBlackBox
class branin:
"""
Branin function of the Dixon-Szego test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==2)
value = ((x[1] - (5.1/(4*math.pi*math.pi))*x[0]*x[0] +
5/math.pi*x[0] - 6)**2 + 10*(1-1/(8*math.pi)) *
math.cos(x[0]) +10)
return(value)
dimension = 2
var_lower = np.array([-5, 0])
var_upper = np.array([10, 15])
optimum_point = np.array([9.42477796, 2.47499998])
additional_optima = np.array([ [-3.14159265, 12.27500000],
[3.14159265, 2.27500000] ])
optimum_value = 0.397887357729739
var_type = np.array(['R'] * 2)
# -- end class
class hartman3:
"""
Hartman3 function of the Dixon-Szego test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==3)
value = -math.fsum([ cls.c[i] *
np.exp(-math.fsum([cls.a[j][i]*
(x[j] - cls.p[j][i])**2
for j in range(3)]))
for i in range(4) ])
return(value)
a = [ [3.0, 0.1, 3.0, 0.1],
[10.0, 10.0, 10.0, 10.0],
[30.0, 35.0, 30.0, 35.0] ]
p = [ [0.36890, 0.46990, 0.10910, 0.03815],
[0.11700, 0.43870, 0.87320, 0.57430],
[0.26730, 0.74700, 0.55470, 0.88280] ]
c = [1.0, 1.2, 3.0, 3.2]
dimension = 3
var_lower = np.array([0, 0, 0])
var_upper = np.array([1, 1, 1])
optimum_point = np.array([0.1, 0.55592003, 0.85218259])
optimum_value = -3.8626347486217725
var_type = np.array(['R'] * 3)
# -- end class
class hartman6:
"""
Hartman6 function of the Dixon-Szego test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==6)
value = -math.fsum([ cls.c[i] *
np.exp(-math.fsum([cls.a[j][i]*
(x[j] - cls.p[j][i])**2
for j in range(6)]))
for i in range(4) ])
return(value)
a = [ [10.00, 0.05, 3.00, 17.00],
[3.00, 10.00, 3.50, 8.00],
[17.00, 17.00, 1.70, 0.05],
[3.50, 0.10, 10.00, 10.00],
[1.70, 8.00, 17.00, 0.10],
[8.00, 14.00, 8.00, 14.00] ]
p = [ [0.1312, 0.2329, 0.2348, 0.4047],
[0.1696, 0.4135, 0.1451, 0.8828],
[0.5569, 0.8307, 0.3522, 0.8732],
[0.0124, 0.3736, 0.2883, 0.5743],
[0.8283, 0.1004, 0.3047, 0.1091],
[0.5886, 0.9991, 0.6650, 0.0381] ]
c = [1.0, 1.2, 3.0, 3.2]
dimension = 6
var_lower = np.array([0, 0, 0, 0, 0, 0])
var_upper = np.array([1, 1, 1, 1, 1, 1])
optimum_point = np.array([0.20168952, 0.15001069, 0.47687398,
0.27533243, 0.31165162, 0.65730054])
optimum_value = -3.32236801141551
var_type = np.array(['R'] * 6)
# -- end class
class camel:
"""
Six-hump Camel function of the Dixon-Szego test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==2)
value = ((4 - 2.1*x[0]**2 + x[0]**4/3)*x[0]**2 +
x[0]*x[1] + (-4 + 4*x[1]**2)*x[1]**2)
return(value)
dimension = 2
var_lower = np.array([-3, -2])
var_upper = np.array([3, 2])
optimum_point = np.array([0.08984201, -0.7126])
optimum_value = -1.0316284535
var_type = np.array(['R'] * 2)
# -- end class
class goldsteinprice:
"""
Goldstein & Price function of the Dixon-Szego test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==2)
value= ((1 + (x[0] + x[1] + 1)**2 *
(19 - 14*x[0] + 3*x[0]**2 - 14*x[1] + 6*x[0]*x[1] +
3*x[1]**2)) *
(30 + (2*x[0] - 3*x[1])**2 *
(18 - 32*x[0] + 12*x[0]**2 + 48*x[1] - 36*x[0]*x[1] +
27*x[1]**2)))
return(value)
dimension = 2
var_lower = np.array([-2, -2])
var_upper = np.array([2, 2])
optimum_point = np.array([0.0, -1.0])
optimum_value = 3
var_type = np.array(['R'] * 2)
# -- end class
class shekel5:
"""
Shekel5 function of the Dixon-Szego test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==4)
value = -math.fsum([ 1.0 /
(math.fsum([math.fsum([ (x[i] - cls.a[i][j])**2
for i in range(4) ]),
cls.c[j]])) for j in range(5) ])
return(value)
a = [ [4.0, 1.0, 8.0, 6.0, 3.0],
[4.0, 1.0, 8.0, 6.0, 7.0],
[4.0, 1.0, 8.0, 6.0, 3.0],
[4.0, 1.0, 8.0, 6.0, 7.0] ]
c = [0.1, 0.2, 0.2, 0.4, 0.4]
dimension = 4
var_lower = np.array([0, 0, 0, 0])
var_upper = np.array([10, 10, 10, 10])
optimum_point = np.array([4, 4, 4, 4])
optimum_value = -10.1531958509790
var_type = np.array(['R'] * 4)
# -- end class
class shekel7:
"""
Shekel7 function of the Dixon-Szego test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==4)
value = -math.fsum([ 1.0 /
(math.fsum([math.fsum([ (x[i] - cls.a[i][j])**2
for i in range(4) ]),
cls.c[j]])) for j in range(7) ])
return(value)
a = [ [4.0, 1.0, 8.0, 6.0, 3.0, 2.0, 5.0],
[4.0, 1.0, 8.0, 6.0, 7.0, 9.0, 5.0],
[4.0, 1.0, 8.0, 6.0, 3.0, 2.0, 3.0],
[4.0, 1.0, 8.0, 6.0, 7.0, 9.0, 3.0] ]
c = [0.1, 0.2, 0.2, 0.4, 0.4, 0.6, 0.3]
dimension = 4
var_lower = np.array([0, 0, 0, 0])
var_upper = np.array([10, 10, 10, 10])
optimum_point = np.array([4, 4, 4, 4])
optimum_value = -10.4028188369303
var_type = np.array(['R'] * 4)
# -- end class
class shekel10:
"""
Shekel10 function of the Dixon-Szego test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==4)
value = -math.fsum([ 1.0 /
(math.fsum([math.fsum([ (x[i] - cls.a[i][j])**2
for i in range(4) ]),
cls.c[j]])) for j in range(10) ])
return(value)
a = [ [4.0, 1.0, 8.0, 6.0, 3.0, 2.0, 5.0, 8.0, 6.0, 7.0],
[4.0, 1.0, 8.0, 6.0, 7.0, 9.0, 5.0, 1.0, 2.0, 3.6],
[4.0, 1.0, 8.0, 6.0, 3.0, 2.0, 3.0, 8.0, 6.0, 7.0],
[4.0, 1.0, 8.0, 6.0, 7.0, 9.0, 3.0, 1.0, 2.0, 3.6] ]
c = [0.1, 0.2, 0.2, 0.4, 0.4, 0.6, 0.3, 0.7, 0.5, 0.5]
dimension = 4
var_lower = np.array([0, 0, 0, 0])
var_upper = np.array([10, 10, 10, 10])
optimum_point = np.array([4, 4, 4, 4])
optimum_value = -10.53628372621960
var_type = np.array(['R'] * 4)
# -- end class
class ex4_1_1:
"""
ex4_1_1 function of the GlobalLib test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==1)
value = (x[0]**6 - (52.0/25.0)*x[0]**5 + (39.0/80.0)*x[0]**4 +
(71.0/10.0)*x[0]**3 - (79.0/20.0)*x[0]**2 - x[0] +
1.0/10.0)
return(value)
dimension = 1
var_lower = np.array([-2])
var_upper = np.array([11])
optimum_point = np.array([-1.19131])
optimum_value = -7.487312360731
var_type = np.array(['R'])
# -- end class
class ex4_1_2:
"""
ex4_1_2 function of the GlobalLib test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==1)
a = [-500, 2.5, 1.666666666, 1.25, 1.0, 0.8333333, 0.714285714,
0.625, 0.555555555, 1.0, -43.6363636, 0.41666666, 0.384615384,
0.357142857, 0.3333333, 0.3125, 0.294117647, 0.277777777,
0.263157894, 0.25, 0.238095238, 0.227272727, 0.217391304,
0.208333333, 0.2, 0.192307692, 0.185185185, 0.178571428,
0.344827586, 0.6666666, -15.48387097, 0.15625, 0.1515151,
0.14705882, 0.14285712, 0.138888888, 0.135135135, 0.131578947,
0.128205128, 0.125, 0.121951219, 0.119047619, 0.116279069,
0.113636363, 0.1111111, 0.108695652, 0.106382978, 0.208333333,
0.408163265, 0.8]
value = math.fsum([a[i]*x[0]**(i+1) for i in range(50)])
return(value)
dimension = 1
var_lower = np.array([1])
var_upper = np.array([2])
optimum_point = np.array([1.09106])
optimum_value = -663.4993631230575
var_type = np.array(['R'] * 1)
# -- end class
class ex8_1_1:
"""
ex8_1_1 function of the GlobalLib test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==2)
value = np.cos(x[0])*np.sin(x[1]) - x[0]/(x[1]**2+1)
return(value)
dimension = 2
var_lower = np.array([-1, -1])
var_upper = np.array([2, 1])
optimum_point = np.array([2.0, 0.105783])
optimum_value = -2.0218067833
var_type = np.array(['R'] * 2)
# -- end class
class ex8_1_4:
"""
ex8_1_4 function of the GlobalLib test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==2)
value = 12*x[0]**2-6.3*x[0]**4+x[0]**6-6*x[0]*x[1]+6*x[1]**2
return(value)
dimension = 2
var_lower = np.array([-2, -5])
var_upper = np.array([4, 2])
optimum_point = np.array([0.0, 0.0])
optimum_value = 0.0
var_type = np.array(['R'] * 2)
# -- end class
class least:
"""
least function of the GlobalLib test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==3)
value = ((127 + (-x[1]*np.exp(-5*x[2])) - x[0])**2 +
(151 + (-x[1]*np.exp(-3*x[2])) - x[0])**2 +
(379 + (-x[1]*np.exp(-x[2])) - x[0])**2 +
(421 + (-x[1]*np.exp(5*x[2])) - x[0])**2 +
(460 + (-x[1]*np.exp(3*x[2])) - x[0])**2 +
(426 + (-x[1]*np.exp(x[2])) - x[0])**2)
return(value)
dimension = 3
var_lower = np.array([0, -200, -5])
var_upper = np.array([600, 200, 5] )
optimum_point = np.array([516.651174172, -149.351893696, -0.206642767973])
optimum_value = 14085.139848928
var_type = np.array(['R'] * 3)
# -- end class
class rbrock:
"""
rbrock function of the GlobalLib test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==2)
value = (100*(x[1] - x[0]**2)**2 + (1 - x[0])**2)
return(value)
dimension = 2
var_lower = np.array([-10, -10])
var_upper = np.array([5, 10])
optimum_point = np.array([1.0, 1.0])
optimum_value = 0.0
var_type = np.array(['R'] * 2)
# -- end class
class perm_6:
"""
perm function of dimension 6 from <NAME>.
http://www.mat.univie.ac.at/~neum/glopt/my_problems.html
We use parameters (6, 60) here.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==6)
beta = 60
value = math.fsum([ (math.fsum([((i + 1)**k + beta) *
((x[i]/(i+1))**k - 1)
for i in range(6)]))**2
for k in range(6) ]) + 1000
return(value)
dimension = 6
var_lower = np.array([-6 for i in range(6)])
var_upper = np.array([6 for i in range(6)])
optimum_point = np.array([(i+1) for i in range(6)])
optimum_value = 1000.0
var_type = np.array(['R'] * 6)
# -- end class
class perm0_8:
"""
perm0 function of dimension 8 from <NAME>.
http://www.mat.univie.ac.at/~neum/glopt/my_problems.html
We use parameters (8, 100) here.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==8)
beta = 100
value = math.fsum([ (math.fsum([(i + 1 + beta) *
(x[i]**k - (1/(i+1))**k)
for i in range(8)]))**2
for k in range(8) ]) + 1000
return(value)
dimension = 8
var_lower = np.array([-1 for i in range(8)])
var_upper = np.array([1 for i in range(8)])
optimum_point = np.array([1.0/(i+1) for i in range(8)])
optimum_value = 1000.0
var_type = np.array(['R'] * 8)
# -- end class
class schoen_6_1:
"""
schoen function of dimension 6 with 50 stationary points.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==6)
numerator = 0.0
denominator = 0.0
dist = np.sum((x - cls.z)**2, axis=1)
for i in range(50):
prod = 1.0
for j in range(50):
if (i != j):
prod *= dist[j]
numerator += cls.f[i]*prod
denominator += prod
value = numerator/denominator
return(value)
z = np.array(
[[0.298854, 0.181010, 0.984817, 0.125272, 0.548396, 0.894658],
[0.800371, 0.817380, 0.398577, 0.652349, 0.250843, 0.130235],
[0.268631, 0.929778, 0.640422, 0.462004, 0.492930, 0.434955],
[0.257863, 0.729198, 0.210810, 0.364378, 0.228216, 0.947432],
[0.767627, 0.592150, 0.103788, 0.696895, 0.472449, 0.244504],
[0.369630, 0.110889, 0.072344, 0.515753, 0.068087, 0.103057],
[0.425457, 0.807081, 0.491209, 0.449497, 0.065690, 0.592775],
[0.544229, 0.619841, 0.704609, 0.573098, 0.044844, 0.305800],
[0.164031, 0.722884, 0.670496, 0.517915, 0.176386, 0.921565],
[0.153788, 0.703577, 0.899129, 0.406134, 0.941356, 0.538215],
[0.984781, 0.510479, 0.573361, 0.884599, 0.399472, 0.712935],
[0.488416, 0.403997, 0.888823, 0.048434, 0.265197, 0.478025],
[0.047985, 0.280071, 0.709960, 0.278919, 0.035737, 0.037699],
[0.656172, 0.498412, 0.458622, 0.982970, 0.041234, 0.921127],
[0.590802, 0.359690, 0.396516, 0.338153, 0.320793, 0.847369],
[0.649160, 0.846974, 0.451818, 0.064864, 0.818545, 0.955844],
[0.583716, 0.669610, 0.463098, 0.492710, 0.989690, 0.002397],
[0.097300, 0.112389, 0.128759, 0.182995, 0.262808, 0.701887],
[0.487363, 0.892520, 0.269056, 0.116046, 0.905416, 0.808013],
[0.908316, 0.023997, 0.670399, 0.985859, 0.178548, 0.450410],
[0.230409, 0.381732, 0.613667, 0.697260, 0.016950, 0.736507],
[0.132544, 0.526349, 0.650042, 0.084086, 0.979257, 0.771499],
[0.872978, 0.008826, 0.587481, 0.624637, 0.623175, 0.939539],
[0.447828, 0.836386, 0.223285, 0.422756, 0.344488, 0.555953],
[0.546839, 0.153934, 0.953017, 0.640891, 0.666774, 0.647583],
[0.762237, 0.608920, 0.401447, 0.056202, 0.203535, 0.890609],
[0.655150, 0.444544, 0.495582, 0.247926, 0.155128, 0.188004],
[0.481813, 0.387178, 0.597276, 0.634671, 0.285404, 0.714793],
[0.976385, 0.018854, 0.262585, 0.640434, 0.086314, 0.669879],
[0.120164, 0.882300, 0.057626, 0.695111, 0.735135, 0.004711],
[0.414644, 0.715618, 0.642033, 0.770645, 0.407019, 0.502945],
[0.257475, 0.620029, 0.840603, 0.638546, 0.636521, 0.883558],
[0.788980, 0.374926, 0.448016, 0.081941, 0.225763, 0.944905],
[0.661591, 0.178832, 0.790349, 0.141653, 0.424235, 0.571960],
[0.546361, 0.624907, 0.190470, 0.412713, 0.124748, 0.662788],
[0.226384, 0.065829, 0.960836, 0.767766, 0.089695, 0.441792],
[0.303675, 0.370047, 0.973692, 0.830432, 0.424719, 0.173571],
[0.548375, 0.823234, 0.334253, 0.078398, 0.097269, 0.195120],
[0.646225, 0.100478, 0.723833, 0.891035, 0.386094, 0.360272],
[0.362757, 0.114700, 0.731020, 0.783785, 0.250399, 0.244399],
[0.904335, 0.869074, 0.479004, 0.525872, 0.359411, 0.338333],
[0.563175, 0.245903, 0.694417, 0.833524, 0.205055, 0.132535],
[0.401356, 0.920963, 0.401902, 0.120625, 0.765834, 0.381552],
[0.769562, 0.279591, 0.567598, 0.017192, 0.697366, 0.813451],
[0.738572, 0.984740, 0.007616, 0.005382, 0.592976, 0.771773],
[0.683721, 0.824097, 0.731623, 0.936945, 0.182420, 0.393537],
[0.375859, 0.541929, 0.974640, 0.377459, 0.754060, 0.019335],
[0.410275, 0.619158, 0.148428, 0.419225, 0.637412, 0.204038],
[0.552701, 0.472723, 0.491747, 0.017922, 0.198525, 0.074668],
[0.749510, 0.158720, 0.395476, 0.528285, 0.143614, 0.961610]])
f = np.array(
[-1000, -1000, -1000, 672.2, 861.4, 520.9, 121.0, 11.5, 48.2,
702.4, 536.2, 457.7, 801.3, 787.7, 768.6, 292.4, 960.0, 573.1,
303.7, 283.3, 474.1, 216.9, 462.2, 853.6, 677.1, 464.6, 830.6,
831.8, 109.6, 967.6, 122.9, 896.2, 490.2, 710.4, 81.1, 802.9,
999.8, 945.5, 672.3, 712.9, 235.8, 266.5, 772.4, 326.6, 585.5,
16.9, 135.9, 224.2, 382.1, 614.6])
dimension = 6
var_lower = np.array([0 for i in range(6)])
var_upper = np.array([1 for i in range(6)])
optimum_point = np.array([0.298854, 0.181010, 0.984817,
0.125272, 0.548396, 0.894658])
optimum_value = -1000
var_type = np.array(['R'] * 6)
# -- end class
class schoen_6_2:
"""
schoen function of dimension 6 with 50 stationary points.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==6)
numerator = 0.0
denominator = 0.0
dist = np.sum((x - cls.z)**2, axis=1)
for i in range(50):
prod = 1.0
for j in range(50):
if (i != j):
prod *= dist[j]
numerator += cls.f[i]*prod
denominator += prod
value = numerator/denominator
return(value)
z = np.array(
[[0.669711, 0.815540, 0.646120, 0.377447, 0.111538, 0.040529],
[0.000632, 0.706804, 0.857031, 0.473778, 0.993569, 0.616184],
[0.625617, 0.880221, 0.534547, 0.760235, 0.276998, 0.735438],
[0.774577, 0.922914, 0.947791, 0.315328, 0.414841, 0.785803],
[0.079768, 0.131498, 0.225123, 0.464621, 0.638041, 0.992795],
[0.471038, 0.244503, 0.565776, 0.898397, 0.604639, 0.306230],
[0.642233, 0.482219, 0.034943, 0.934805, 0.972714, 0.153664],
[0.550151, 0.310507, 0.042126, 0.230722, 0.444375, 0.117355],
[0.789984, 0.488482, 0.065237, 0.842940, 0.793454, 0.799489],
[0.850183, 0.754551, 0.516033, 0.166362, 0.201966, 0.044234],
[0.000601, 0.896758, 0.304433, 0.149125, 0.178398, 0.871836],
[0.056787, 0.932745, 0.218009, 0.778061, 0.131847, 0.356237],
[0.210266, 0.221479, 0.014831, 0.200901, 0.656693, 0.891819],
[0.528515, 0.178025, 0.188138, 0.411485, 0.217833, 0.907579],
[0.195801, 0.663099, 0.477312, 0.395250, 0.655791, 0.820570],
[0.933208, 0.789323, 0.350520, 0.855434, 0.491082, 0.874993],
[0.251047, 0.543513, 0.529644, 0.218495, 0.351637, 0.608904],
[0.963286, 0.793004, 0.650148, 0.881362, 0.904832, 0.005397],
[0.431744, 0.438965, 0.044544, 0.834968, 0.330614, 0.451282],
[0.234845, 0.328576, 0.388284, 0.339183, 0.206086, 0.600034],
[0.512783, 0.961787, 0.959109, 0.632098, 0.910614, 0.912025],
[0.454168, 0.743189, 0.834284, 0.955817, 0.072172, 0.523068],
[0.696968, 0.720236, 0.341060, 0.054580, 0.045599, 0.549192],
[0.272955, 0.318845, 0.700767, 0.426325, 0.895755, 0.843128],
[0.992189, 0.332899, 0.272784, 0.019284, 0.073711, 0.434800],
[0.154276, 0.639611, 0.924641, 0.587242, 0.358453, 0.548022],
[0.021506, 0.450392, 0.515150, 0.032232, 0.650223, 0.849384],
[0.316499, 0.513234, 0.958219, 0.843587, 0.125408, 0.836643],
[0.538587, 0.261750, 0.732136, 0.030271, 0.893345, 0.270532],
[0.987469, 0.708780, 0.446487, 0.968784, 0.734448, 0.788229],
[0.353358, 0.135036, 0.249018, 0.565029, 0.740519, 0.250807],
[0.810372, 0.656510, 0.472093, 0.225741, 0.420513, 0.202519],
[0.848128, 0.551586, 0.513140, 0.956164, 0.483389, 0.404478],
[0.292239, 0.297077, 0.934202, 0.468329, 0.872274, 0.992632],
[0.828869, 0.534749, 0.716451, 0.405855, 0.164485, 0.531068],
[0.130616, 0.757677, 0.284500, 0.438300, 0.957643, 0.725899],
[0.503542, 0.640368, 0.381914, 0.847206, 0.134660, 0.762294],
[0.653851, 0.646544, 0.436036, 0.944225, 0.310369, 0.392362],
[0.539397, 0.027168, 0.697972, 0.209293, 0.992890, 0.008113],
[0.902045, 0.171034, 0.194924, 0.620057, 0.002203, 0.557433],
[0.802612, 0.085835, 0.380626, 0.492568, 0.238166, 0.961837],
[0.466993, 0.647847, 0.113397, 0.015357, 0.928904, 0.166425],
[0.892021, 0.869756, 0.681364, 0.129555, 0.394682, 0.745036],
[0.060675, 0.869904, 0.757236, 0.220765, 0.615988, 0.754288],
[0.031815, 0.340961, 0.455958, 0.529616, 0.840036, 0.365200],
[0.834595, 0.603639, 0.745330, 0.085080, 0.184636, 0.238718],
[0.575681, 0.250761, 0.874497, 0.870401, 0.854591, 0.968971],
[0.359629, 0.724830, 0.455053, 0.120311, 0.258563, 0.932004],
[0.209891, 0.990298, 0.767661, 0.284193, 0.375076, 0.154363],
[0.410402, 0.437385, 0.639614, 0.946647, 0.579466, 0.524775]])
f = np.array(
[-1000, -1000, -1000, 109.6, 132.4, 558.2, 158.0, 6.2, 205.4,
593.9, 2.4, 399.8, 395.9, 212.6, 976.1, 104.4, 552.1, 436.3,
837.1, 283.7, 779.7, 392.1, 85.8, 885.1, 401.5, 367.5, 694.4,
691.6, 933.1, 590.7, 246.2, 370.0, 54.3, 719.4, 95.2, 276.0,
829.1, 613.6, 242.8, 424.6, 320.6, 666.1, 479.2, 420.0, 956.6,
241.0, 21.1, 169.8, 178.1, 394.4])
dimension = 6
var_lower = np.array([0 for i in range(6)])
var_upper = np.array([1 for i in range(6)])
optimum_point = np.array([0.669711, 0.815540, 0.646120,
0.377447, 0.111538, 0.040529])
optimum_value = -1000
var_type = np.array(['R'] * 6)
# -- end class
class schoen_10_1:
"""
schoen function of dimension 10 with 50 stationary points.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==10)
numerator = 0.0
denominator = 0.0
dist = np.sum((x - cls.z)**2, axis=1)
for i in range(50):
prod = 1.0
for j in range(50):
if (i != j):
prod *= dist[j]
numerator += cls.f[i]*prod
denominator += prod
value = numerator/denominator
return(value)
z = np.array(
[[0.914871, 0.765230, 0.139426, 0.617466, 0.823635,
0.794003, 0.801171, 0.568811, 0.279434, 0.540422],
[0.976983, 0.593277, 0.701115, 0.585262, 0.669106,
0.272906, 0.177127, 0.143389, 0.561181, 0.018744],
[0.385208, 0.984106, 0.390066, 0.905970, 0.169600,
0.191291, 0.564157, 0.689910, 0.857031, 0.715390],
[0.975998, 0.536904, 0.819333, 0.801793, 0.564454,
0.336124, 0.654190, 0.044197, 0.717416, 0.465807],
[0.750519, 0.415284, 0.258927, 0.736115, 0.597744,
0.763716, 0.747691, 0.969633, 0.188117, 0.964954],
[0.412888, 0.671756, 0.380214, 0.558595, 0.768370,
0.998320, 0.212183, 0.606757, 0.531315, 0.303569],
[0.196682, 0.139879, 0.108608, 0.736975, 0.755971,
0.021390, 0.852398, 0.188596, 0.920133, 0.045012],
[0.956270, 0.729258, 0.397664, 0.013146, 0.519861,
0.300011, 0.008396, 0.820346, 0.176841, 0.402298],
[0.126432, 0.872346, 0.923581, 0.297492, 0.992744,
0.486525, 0.915493, 0.589980, 0.498242, 0.989945],
[0.697409, 0.026641, 0.875467, 0.503039, 0.563285,
0.096769, 0.933643, 0.884419, 0.585825, 0.395465],
[0.494783, 0.824300, 0.153326, 0.202651, 0.579815,
0.416954, 0.707624, 0.497959, 0.568876, 0.812841],
[0.126963, 0.757337, 0.648583, 0.787445, 0.822586,
0.401155, 0.301350, 0.562707, 0.744074, 0.088372],
[0.293611, 0.835864, 0.925111, 0.760322, 0.729456,
0.096840, 0.651466, 0.975836, 0.691353, 0.038384],
[0.999250, 0.916829, 0.205699, 0.027241, 0.156956,
0.206598, 0.175242, 0.811219, 0.660192, 0.119865],
[0.387978, 0.665180, 0.774376, 0.135223, 0.766238,
0.380668, 0.058279, 0.727506, 0.991527, 0.345759],
[0.299341, 0.066231, 0.680305, 0.392230, 0.319985,
0.698292, 0.100236, 0.394973, 0.096232, 0.362943],
[0.281548, 0.860858, 0.647870, 0.981650, 0.110777,
0.836484, 0.697387, 0.659942, 0.694425, 0.434991],
[0.606706, 0.052287, 0.858208, 0.738885, 0.158495,
0.002367, 0.933796, 0.112986, 0.647308, 0.421573],
[0.776505, 0.101364, 0.610406, 0.275033, 0.548409,
0.998967, 0.536743, 0.943903, 0.960993, 0.251672],
[0.371347, 0.491122, 0.772374, 0.860206, 0.752131,
0.338591, 0.826739, 0.312111, 0.768881, 0.862719],
[0.866886, 0.358220, 0.131205, 0.276334, 0.334111,
0.429525, 0.752197, 0.167524, 0.437764, 0.162916],
[0.584246, 0.511215, 0.659647, 0.349220, 0.954428,
0.477982, 0.386041, 0.813944, 0.753530, 0.983276],
[0.697327, 0.499835, 0.530487, 0.599958, 0.497257,
0.998852, 0.106262, 0.186978, 0.887481, 0.749174],
[0.041611, 0.278918, 0.999095, 0.825221, 0.218320,
0.383711, 0.077041, 0.642061, 0.668906, 0.758298],
[0.072437, 0.592862, 0.040655, 0.446330, 0.651659,
0.055738, 0.631924, 0.890039, 0.192989, 0.741054],
[0.533886, 0.135079, 0.787647, 0.593408, 0.749228,
0.749045, 0.190386, 0.755508, 0.465321, 0.465156],
[0.748843, 0.696419, 0.882124, 0.843895, 0.858057,
0.220107, 0.350310, 0.102947, 0.453576, 0.875940],
[0.560231, 0.580247, 0.381834, 0.807535, 0.184636,
0.615702, 0.628408, 0.081783, 0.793384, 0.233639],
[0.384827, 0.589138, 0.630013, 0.634506, 0.630712,
0.521293, 0.494486, 0.681700, 0.288512, 0.319808],
[0.721978, 0.452289, 0.426726, 0.323106, 0.781584,
0.999325, 0.043670, 0.884560, 0.520936, 0.430684],
[0.810388, 0.624041, 0.811624, 0.105973, 0.199807,
0.440644, 0.864152, 0.282280, 0.397116, 0.499932],
[0.973889, 0.677797, 0.080137, 0.549098, 0.625445,
0.577342, 0.538642, 0.388039, 0.552273, 0.793807],
[0.365176, 0.228017, 0.623500, 0.084450, 0.177343,
0.910108, 0.632719, 0.521458, 0.894843, 0.707893],
[0.502069, 0.622312, 0.958019, 0.744999, 0.515695,
0.407885, 0.590739, 0.736542, 0.297555, 0.237955],
[0.313835, 0.090014, 0.336274, 0.433171, 0.330864,
0.105751, 0.160367, 0.651934, 0.207260, 0.293577],
[0.886072, 0.592935, 0.498116, 0.321835, 0.011216,
0.543911, 0.506579, 0.216779, 0.406812, 0.261349],
[0.789947, 0.881332, 0.696597, 0.742955, 0.252224,
0.718157, 0.188217, 0.371208, 0.178640, 0.347720],
[0.482759, 0.663618, 0.622706, 0.036170, 0.278854,
0.088147, 0.482808, 0.134824, 0.028828, 0.944537],
[0.184705, 0.662346, 0.917194, 0.186490, 0.918392,
0.955111, 0.636015, 0.447595, 0.813716, 0.372839],
[0.231741, 0.637199, 0.745257, 0.201568, 0.697485,
0.897022, 0.239791, 0.495219, 0.153831, 0.387172],
[0.198061, 0.194102, 0.550259, 0.751804, 0.503973,
0.034252, 0.788267, 0.731760, 0.118338, 0.057247],
[0.068470, 0.545180, 0.668845, 0.714932, 0.688014,
0.203845, 0.146138, 0.109039, 0.470214, 0.441797],
[0.085180, 0.142394, 0.938665, 0.071422, 0.946796,
0.697832, 0.472400, 0.161384, 0.325715, 0.122550],
[0.637672, 0.986961, 0.969438, 0.989508, 0.381318,
0.800871, 0.012035, 0.326007, 0.459124, 0.645374],
[0.147210, 0.954608, 0.361146, 0.094699, 0.092327,
0.301664, 0.478447, 0.008274, 0.680576, 0.004184],
[0.768792, 0.812618, 0.915766, 0.029070, 0.506944,
0.457816, 0.839167, 0.024706, 0.990756, 0.088779],
[0.872678, 0.601536, 0.948347, 0.621023, 0.415621,
0.289340, 0.291338, 0.190461, 0.664007, 0.583513],
[0.641216, 0.700152, 0.080576, 0.355500, 0.294700,
0.338614, 0.563964, 0.528079, 0.759223, 0.508432],
[0.738489, 0.077376, 0.429485, 0.300586, 0.576927,
0.185931, 0.231659, 0.954833, 0.614178, 0.092903],
[0.729321, 0.318607, 0.768657, 0.899419, 0.749499,
0.623403, 0.671793, 0.052835, 0.973726, 0.168336]])
f = np.array(
[-1000, -1000, -1000, 799.1, 396.8, 370.3, 400.2, 239.7,
678.8, 868.9, 564.4, 681.6, 153.0, 760.7, 562.9, 434.9,
579.2, 260.6, 88.5, 601.3, 754.8, 894.8, 672.8, 633.7, 921.8,
43.2, 286.2, 945.5, 716.0, 72.7, 631.2, 640.3, 425.1, 825.8,
555.8, 136.9, 805.7, 786.5, 400.0, 856.4, 548.0, 510.8, 52.3,
111.6, 686.6, 888.2, 315.4, 333.9, 61.5, 755.2])
dimension = 10
var_lower = np.array([0 for i in range(10)])
var_upper = np.array([1 for i in range(10)])
optimum_point = np.array([0.914871, 0.765230, 0.139426, 0.617466,
0.823635, 0.794003, 0.801171, 0.568811,
0.279434, 0.540422])
optimum_value = -1000
var_type = np.array(['R'] * 10)
# -- end class
class schoen_10_2:
"""
schoen function of dimension 10 with 50 stationary points.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==10)
numerator = 0.0
denominator = 0.0
dist = np.sum((x - cls.z)**2, axis=1)
for i in range(50):
prod = 1.0
for j in range(50):
if (i != j):
prod *= dist[j]
numerator += cls.f[i]*prod
denominator += prod
value = numerator/denominator
return(value)
z = np.array(
[[0.131461, 0.965235, 0.046134, 0.983011, 0.719813,
0.827542, 0.662422, 0.570546, 0.578707, 0.013264],
[0.068454, 0.682785, 0.582736, 0.434517, 0.310613,
0.869876, 0.993949, 0.629156, 0.590599, 0.356378],
[0.632837, 0.961665, 0.015079, 0.378878, 0.805608,
0.685239, 0.528658, 0.752934, 0.717790, 0.374865],
[0.286191, 0.912944, 0.400358, 0.902532, 0.324887,
0.850063, 0.483503, 0.764147, 0.147726, 0.159851],
[0.303483, 0.754790, 0.090527, 0.653764, 0.164323,
0.402931, 0.593477, 0.448444, 0.711483, 0.113869],
[0.057398, 0.302029, 0.596351, 0.565466, 0.694204,
0.974864, 0.323989, 0.298493, 0.859391, 0.238714],
[0.139267, 0.214902, 0.608462, 0.297987, 0.499810,
0.578553, 0.548077, 0.208442, 0.046162, 0.246848],
[0.680420, 0.783181, 0.828103, 0.475810, 0.680401,
0.188455, 0.015200, 0.650103, 0.762389, 0.063985],
[0.409243, 0.600740, 0.302354, 0.588411, 0.436291,
0.294790, 0.701477, 0.994162, 0.433749, 0.535320],
[0.077949, 0.530126, 0.869737, 0.387811, 0.705317,
0.632911, 0.442087, 0.082918, 0.441383, 0.591975],
[0.622628, 0.054964, 0.020475, 0.145616, 0.163873,
0.321546, 0.282867, 0.743494, 0.750568, 0.732386],
[0.538574, 0.066932, 0.225204, 0.290045, 0.613242,
0.529365, 0.384018, 0.946557, 0.974384, 0.425297],
[0.108817, 0.850094, 0.886417, 0.161581, 0.082973,
0.506354, 0.589650, 0.638991, 0.045151, 0.688464],
[0.917742, 0.365119, 0.484176, 0.173231, 0.210253,
0.303688, 0.992141, 0.023109, 0.977178, 0.535146],
[0.183469, 0.198085, 0.511596, 0.275610, 0.753700,
0.437328, 0.986237, 0.028654, 0.767921, 0.997910],
[0.484908, 0.759122, 0.577318, 0.359934, 0.935730,
0.617833, 0.770173, 0.311175, 0.004831, 0.157457],
[0.634077, 0.236972, 0.016427, 0.261753, 0.349712,
0.245870, 0.412238, 0.523557, 0.985327, 0.094060],
[0.477875, 0.803438, 0.496728, 0.848920, 0.497386,
0.938203, 0.279797, 0.287076, 0.395184, 0.980546],
[0.450215, 0.193712, 0.975838, 0.103925, 0.077410,
0.709573, 0.253072, 0.311723, 0.885664, 0.204528],
[0.557312, 0.815198, 0.097914, 0.539142, 0.826048,
0.130070, 0.049858, 0.223634, 0.076387, 0.831224],
[0.927559, 0.324916, 0.563393, 0.209281, 0.344394,
0.953384, 0.298679, 0.890637, 0.966615, 0.380006],
[0.026403, 0.997573, 0.479163, 0.379686, 0.687928,
0.832002, 0.214326, 0.348248, 0.073151, 0.062646],
[0.726869, 0.911171, 0.961920, 0.874884, 0.216867,
0.076966, 0.776240, 0.495777, 0.963492, 0.425246],
[0.357483, 0.486330, 0.759177, 0.748362, 0.889904,
0.350438, 0.232983, 0.823613, 0.792656, 0.441264],
[0.875826, 0.359459, 0.214808, 0.425850, 0.493328,
0.456048, 0.523145, 0.504154, 0.090128, 0.472437],
[0.813400, 0.808407, 0.427211, 0.902524, 0.210376,
0.490662, 0.915939, 0.169439, 0.078865, 0.485371],
[0.877334, 0.982207, 0.679085, 0.486335, 0.940715,
0.585964, 0.289279, 0.694886, 0.172625, 0.201457],
[0.141599, 0.476124, 0.762246, 0.067045, 0.411332,
0.813196, 0.134138, 0.302390, 0.856145, 0.349243],
[0.346912, 0.082142, 0.787442, 0.857465, 0.371129,
0.448550, 0.967943, 0.775340, 0.943681, 0.656127],
[0.619267, 0.547196, 0.470422, 0.141566, 0.584198,
0.952226, 0.196462, 0.629549, 0.685469, 0.824365],
[0.014209, 0.789812, 0.836373, 0.186139, 0.493840,
0.710697, 0.910033, 0.368287, 0.865953, 0.140892],
[0.482763, 0.072574, 0.026730, 0.143687, 0.739505,
0.419649, 0.013683, 0.662644, 0.785254, 0.234561],
[0.821421, 0.844100, 0.153937, 0.671762, 0.290469,
0.631347, 0.591435, 0.498966, 0.043395, 0.176771],
[0.404994, 0.496656, 0.951774, 0.497357, 0.715401,
0.023378, 0.493045, 0.342766, 0.117055, 0.698590],
[0.985857, 0.831692, 0.423498, 0.215757, 0.341260,
0.790760, 0.941186, 0.716883, 0.062641, 0.582012],
[0.676905, 0.280897, 0.800638, 0.898913, 0.735995,
0.592412, 0.433021, 0.432772, 0.874477, 0.112375],
[0.377382, 0.118941, 0.529204, 0.419434, 0.673891,
0.074904, 0.129868, 0.819585, 0.220536, 0.353223],
[0.233415, 0.136703, 0.487256, 0.777498, 0.901915,
0.612402, 0.778635, 0.436718, 0.484520, 0.641969],
[0.273297, 0.670196, 0.344525, 0.669751, 0.180230,
0.530085, 0.393284, 0.326043, 0.260840, 0.364690],
[0.931213, 0.676123, 0.912481, 0.898258, 0.001887,
0.408306, 0.917215, 0.496959, 0.287951, 0.562511],
[0.047196, 0.780338, 0.895994, 0.088169, 0.552425,
0.130790, 0.308504, 0.232476, 0.187952, 0.105936],
[0.343517, 0.356222, 0.416018, 0.450278, 0.487765,
0.040510, 0.592363, 0.771635, 0.577849, 0.315843],
[0.527759, 0.529503, 0.210423, 0.756794, 0.892670,
0.339374, 0.445837, 0.363265, 0.432114, 0.942045],
[0.560107, 0.110906, 0.115725, 0.761393, 0.969105,
0.921166, 0.455014, 0.593512, 0.111887, 0.217300],
[0.463382, 0.635591, 0.329484, 0.573602, 0.492558,
0.474174, 0.371906, 0.850465, 0.467637, 0.261373],
[0.033051, 0.422543, 0.294155, 0.699026, 0.846231,
0.047967, 0.686826, 0.480273, 0.463181, 0.345601],
[0.285473, 0.723925, 0.202386, 0.671909, 0.685277,
0.993969, 0.415329, 0.155218, 0.233826, 0.088752],
[0.029705, 0.651519, 0.813239, 0.677718, 0.961189,
0.285385, 0.824635, 0.837670, 0.524970, 0.815489],
[0.519627, 0.508274, 0.141067, 0.156163, 0.274566,
0.536322, 0.834749, 0.852042, 0.656166, 0.964211],
[0.119675, 0.971352, 0.052983, 0.178217, 0.408438,
0.215091, 0.102098, 0.256312, 0.051758, 0.906712]])
f = np.array(
[-1000, -1000, -1000, 90.4, 830.9, 52.7, 375.2, 289.7, 244.1,
470.2, 111.7, 968.9, 903.4, 918.5, 820.3, 441.2, 687.5, 836.9,
11.0, 454.5, 929.3, 952.6, 937.2, 870.5, 211.7, 378.4, 320.3,
729.6, 420.8, 213.8, 717.7, 285.4, 522.8, 748.3, 371.0, 501.2,
568.6, 111.9, 645.2, 486.2, 157.0, 968.5, 137.6, 127.2, 943.4,
437.2, 199.7, 415.4, 966.0, 362.3])
dimension = 10
var_lower = np.array([0 for i in range(10)])
var_upper = np.array([1 for i in range(10)])
optimum_point = np.array([0.131461, 0.965235, 0.046134, 0.983011,
0.719813, 0.827542, 0.662422, 0.570546,
0.578707, 0.013264])
optimum_value = -1000
var_type = np.array(['R'] * 10)
# -- end class
class schaeffer_f7_12_1:
"""
Schaeffer F7 function.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==12)
value = 0
normalizer = 1.0/float(len(x)-1)
for i in range(len(x)-1):
si = 2**i*np.sqrt((x[i]-cls.optimum_point[i])**2 +
(x[i+1]-cls.optimum_point[i+1])**2)
value += (normalizer * np.sqrt(si) *
(np.sin(50*si**0.20) + 1))**2
return value - 10
dimension = 12
var_lower = np.array([-50 for i in range(12)])
var_upper = np.array([50 for i in range(12)])
optimum_point = np.array([-34.32567, -34.98896, 07.69262, 30.3388,
-48.24371, 23.18355, 24.93374, 32.07436,
46.86153, 04.64872, 25.64591, -16.69128])
optimum_value = -10
var_type = np.array(['R'] * 12)
# -- end class
class schaeffer_f7_12_2:
"""
Schaeffer F7 function.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==12)
value = 0
normalizer = 1.0/float(len(x)-1)
for i in range(len(x)-1):
si = 3**i*np.sqrt((x[i]-cls.optimum_point[i])**2 +
(x[i+1]-cls.optimum_point[i+1])**2)
value += (normalizer * np.sqrt(si) *
(np.sin(50*si**0.20) + 1))**2
return value + 10
dimension = 12
var_lower = np.array([-50 for i in range(12)])
var_upper = np.array([50 for i in range(12)])
optimum_point = np.array([-08.214, 30.69133, 48.26095, -04.94219,
15.15357, 00.4841, -13.54025, -40.78766,
-16.02916, 16.42138, 39.30248, -49.56986])
optimum_value = 10
var_type = np.array(['R'] * 12)
# -- end class
# After this point, all functions are MINLP
class gear:
"""
gear function of the MINLPLib test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==4)
value = ((0.14427932477276 - x[0]*x[1]/(x[2]*x[3]))**2)
return(value)
dimension = 4
var_lower = np.array([12, 12, 12, 12])
var_upper = np.array([60, 60, 60, 60])
optimum_point = np.array([12.0, 23.0, 58.0, 33.0])
optimum_value = 0.0
var_type = np.array(['I'] * 4)
# -- end class
class gear4:
"""
gear4 function of the MINLPLib test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==5)
value = -1000000*x[0]*x[1]/(x[2]*x[3]) + 2*x[4] + 144279.32477276
# There is a constraint:
# -1000000*x[0]*x[1]/(x[2]*x[3]) + x[4] + 144279.32477276 >= 0
penalty = 10*max(0,-(-1000000*x[0]*x[1]/(x[2]*x[3]) + x[4] +
144279.32477276))
return(value + penalty)
dimension = 5
var_lower = np.array([12, 12, 12, 12, 0])
var_upper = np.array([60, 60, 60, 60, 100])
optimum_point = np.array([19.0, 16.0, 43.0, 49.0, 1.64342847396619])
optimum_value = 1.6434284739
var_type = np.array(['I'] * 4 + ['R'])
# -- end class
class nvs02:
"""
nvs02 function of the MINLPLib test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==5)
value = (0.0001*(5.3578547*np.sqrt(x[2]) + 0.8356891*x[0]*x[4] +
37.293239*x[0]) + 5.9207859)
# There are three constraints:
# 0 <= (0.0056858*x[1]*x[4] + 0.0006262*x[0]*x[3] -
# 0.0022053*x[2]*x[4] + 85.334407) <= 92
# 90 <= (0.0071317*x[1]*x[4] + 0.0029955*x[0]*x[1] +
# 0.0021813*math.sqrt(x[2]) + 80.51249) <= 110
# 20 <= (0.0047026*x[2]*x[4] + 0.0012547*x[0]*x[2] +
# 0.0019085*x[2]*x[3] + 9.300961) <= 25
penalty = 0.0
penalty += 10*max(0, -(0.0056858*x[1]*x[4] + 0.0006262*x[0]*x[3] -
0.0022053*x[2]*x[4] + 85.334407))
penalty += 10*max(0, (0.0056858*x[1]*x[4] + 0.0006262*x[0]*x[3] -
0.0022053*x[2]*x[4] + 85.334407) - 92)
penalty += 10*max(0, -(0.0071317*x[1]*x[4] + 0.0029955*x[0]*x[1] +
0.0021813*np.sqrt(x[2]) + 80.51249) + 90)
penalty += 10*max(0, (0.0071317*x[1]*x[4] + 0.0029955*x[0]*x[1] +
0.0021813*np.sqrt(x[2]) + 80.51249) - 110)
penalty += 10*max(0, -(0.0047026*x[2]*x[4] + 0.0012547*x[0]*x[2] +
0.0019085*x[2]*x[3] + 9.300961) + 20)
penalty += 10*max(0, (0.0047026*x[2]*x[4] + 0.0012547*x[0]*x[2] +
0.0019085*x[2]*x[3] + 9.300961) - 25)
return(value + penalty)
dimension = 5
var_lower = np.array([0, 0, 0, 0, 0])
var_upper = np.array([200, 200, 200, 200, 200])
optimum_point = np.array([0.0, 9.0, 9.0, 200.0, 197.0])
optimum_value = 5.9223932564100004
var_type = np.array(['I'] * 5)
# -- end class
class nvs03:
"""
nvs03 function of the MINLPLib test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==2)
value = (-8 + x[0])**2 + (-2 + x[1])**2
# There are two constraints:
# -0.1*x[0]**2 + x[1] >= 0
# -0.333333333333333*x[0] - x[1] + 4.5 >= 0.0
penalty = 0.0
penalty += 100*max(0, -(-0.1*x[0]**2 + x[1]))
penalty += 100*max(0, -(-0.333333333333333*x[0] - x[1] + 4.5))
return(value + penalty)
dimension = 2
var_lower = np.array([0, 0])
var_upper = np.array([200, 200])
optimum_point = np.array([4.0, 2.0])
optimum_value = 16.0
var_type = np.array(['I'] * 2)
# -- end class
class nvs04:
"""
nvs04 function of the MINLPLib test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==2)
value = 100*(0.5 + x[1] - (0.6 + x[0])**2)**2 + (0.4 - x[0])**2
return(value)
dimension = 2
var_lower = np.array([0, 0])
var_upper = np.array([200, 200])
optimum_point = np.array([1.0, 2.0])
optimum_value = 0.72
var_type = np.array(['I'] * 2)
# -- end class
class nvs06:
"""
nvs06 function of the MINLPLib test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==2)
value = (0.1*((x[0])**2 + (1 + (x[1])**2)/(x[0])**2 +
(100 + ((x[0])**2)*(x[1])**2)/(x[0]*x[1])**4) + 1.2)
return(value)
dimension = 2
var_lower = np.array([1, 1])
var_upper = np.array([200, 200])
optimum_point = np.array([2.0, 2.0])
optimum_value = 1.7703125
var_type = np.array(['I'] * 2)
# -- end class
class nvs07:
"""
nvs07 function of the MINLPLib test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==3)
value = 2*x[1]**2 + x[0] + 5*x[2]
# There are two constraints:
# x[2]**2 * x[1] + 5*x[2] + 3*x[0] - 10 >= 0
# x[0] - x[2] - 2.66 >= 0
penalty = 0.0
penalty += 10*max(0, -(x[2]**2 * x[1] + 5*x[2] + 3*x[0] - 10))
penalty += 10*max(0, -(x[0] - x[2] - 2.66))
return(value + penalty)
dimension = 3
var_lower = np.array([0, 0, 0])
var_upper = np.array([200, 200, 200])
optimum_point = np.array([4.0, 0.0, 0.0])
optimum_value = 4.0
var_type = np.array(['I'] * 3)
# -- end class
class nvs09:
"""
nvs09 function of the MINLPLib test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==10)
value = ((np.log(x[0] - 2))**2 + (np.log(10 - x[0]))**2 +
(np.log(x[1] - 2))**2 + (np.log(10 - x[1]))**2 +
(np.log(x[2] - 2))**2 + (np.log(10 - x[2]))**2 +
(np.log(x[3] - 2))**2 + (np.log(10 - x[3]))**2 +
(np.log(x[4] - 2))**2 + (np.log(10 - x[4]))**2 +
(np.log(x[5] - 2))**2 + (np.log(10 - x[5]))**2 +
(np.log(x[6] - 2))**2 + (np.log(10 - x[6]))**2 +
(np.log(x[7] - 2))**2 + (np.log(10 - x[7]))**2 +
(np.log(x[8] - 2))**2 + (np.log(10 - x[8]))**2 +
(np.log(x[9] - 2))**2 + (np.log(10 - x[9]))**2 -
(x[0]*x[1]*x[2]*x[3]*x[4]*x[5]*x[6]*x[7]*x[8]*x[9])**0.2)
return(value)
dimension = 10
var_lower = np.array([3 for i in range(10)])
var_upper = np.array([9 for i in range(10)])
optimum_point = np.array([9, 9, 9, 9, 9, 9, 9, 9, 9, 9])
optimum_value = -43.134336918035
var_type = np.array(['I'] * 10)
# -- end class
class nvs14:
"""
nvs14 function of the MINLPLib test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==5)
value = (5.3578547*x[2]**2 + 0.8356891*x[0]*x[4] + 37.293239*x[0] -
40792.141)
# There are three constraints:
# 0 <= (0.0056858*x[1]*x[4] + 0.0006262*x[0]*x[3] -
# 0.0022053*x[2]*x[4] + 85.334407) <= 92
# 90 <= (0.0071317*x[1]*x[4] + 0.0029955*x[0]*x[1] +
# 0.0021813*x[2]**2 + 80.51249) <= 110
# 20 <= (0.0047026*x[2]*x[4] + 0.0012547*x[0]*x[2] +
# 0.0019085*x[2]*x[3] + 9.300961) <= 25
penalty = 0.0
penalty += 1000*max(0, -(0.0056858*x[1]*x[4] + 0.0006262*x[0]*x[3] -
0.0022053*x[2]*x[4] + 85.334407))
penalty += 1000*max(0, (0.0056858*x[1]*x[4] + 0.0006262*x[0]*x[3] -
0.0022053*x[2]*x[4] + 85.334407) - 92)
penalty += 1000*max(0, -(0.0071317*x[1]*x[4] + 0.0029955*x[0]*x[1] +
0.0021813*x[2]**2 + 80.51249) + 90)
penalty += 1000*max(0, (0.0071317*x[1]*x[4] + 0.0029955*x[0]*x[1] +
0.0021813*x[2]**2 + 80.51249) - 110)
penalty += 1000*max(0, -(0.0047026*x[2]*x[4] + 0.0012547*x[0]*x[2] +
0.0019085*x[2]*x[3] + 9.300961) + 20)
penalty += 1000*max(0, (0.0047026*x[2]*x[4] + 0.0012547*x[0]*x[2] +
0.0019085*x[2]*x[3] + 9.300961) - 25)
return(value + penalty)
dimension = 5
var_lower = np.array([0, 0, 0, 0, 0])
var_upper = np.array([200, 200, 200, 200, 200])
optimum_point = np.array([0.0, 7.0, 9.0, 175.0, 200.0])
optimum_value = -40358.1547693
var_type = np.array(['I'] * 5)
# -- end class
class nvs15:
"""
nvs15 function of the MINLPLib test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==3)
value = (2*x[0]**2 - 8*x[0] + 2*x[1]**2 - 6*x[1] + x[2]**2 - 4*x[2] +
2*x[0]*x[1] + 2*x[0]*x[2] + 9)
# There is one constraint:
# - x[0] - x[1] - 2*x[2] + 3 >= 0
penalty = 0.0
penalty += 10*max(0, -(-x[0] - x[1] - 2*x[2] + 3))
return(value + penalty)
dimension = 3
var_lower = np.array([0, 0, 0])
var_upper = np.array([200, 200, 200])
optimum_point = np.array([2.0, 0.0, 0.0])
optimum_value = 1.0
var_type = np.array(['I'] * 3)
# -- end class
class nvs16:
"""
nvs16 function of the MINLPLib test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==2)
value = ((1.5 - x[0]*(1 - x[1]))**2 +
(2.25 - x[0]*(1 - x[1]**2))**2 +
(2.625 - x[0]*(1 - x[1]**3))**2)
return(value)
dimension = 2
var_lower = np.array([0, 0])
var_upper = np.array([200, 200])
optimum_point = np.array([2.0, 0.0])
optimum_value = 0.703125
var_type = np.array(['I'] * 2)
# -- end class
class prob03:
"""
prob03 function of the MINLPLib test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==2)
value = 3*x[0] + 2*x[1]
# There is one constraint:
# x[0]*x[1] - 3.5 >= 0
penalty = 10*max(0, -(x[0]*x[1] - 3.5))
return(value + penalty)
dimension = 2
var_lower = np.array([1, 1])
var_upper = np.array([5, 5])
optimum_point = np.array([2.0, 2.0])
optimum_value = 10.0
var_type = np.array(['I'] * 2)
# -- end class
class sporttournament06:
"""
sporttournament06 function of the MINLPLib test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==15)
value = (2*x[0]*x[2] - 2*x[0] + 2*x[2] + 2*x[0]*x[6] - 2*x[6]
+ 2*x[1]*x[4] - 2*x[1] - 2*x[4] + 2*x[1]*x[9] -
4*x[9] - 2*x[2]*x[3] + 2*x[3] - 2*x[2]*x[11] -
2*x[2]*x[13] - 2*x[3]*x[4] + 2*x[3]*x[8] - 2*x[8] -
2*x[3]*x[14] + 2*x[4]*x[5] - 2*x[5] + 2*x[4]*x[7] -
2*x[7] + 2*x[5]*x[8] - 2*x[6]*x[7] + 2*x[6]* x[11] +
2*x[6]*x[12] + 2*x[7]*x[9] + 2*x[7]*x[14] +
2*x[8]*x[10] - 2*x[10] - 2*x[8]*x[11] + 2*x[9]* x[10]
+ 2*x[9]*x[11] - 2*x[12]*x[14] + 2*x[13]*x[14])
return(value)
dimension = 15
var_lower = np.array([0] * 15)
var_upper = np.array([1] * 15)
optimum_point = np.array([0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0,
0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0])
optimum_value = -12.0
var_type = np.array(['I'] * 15)
# -- end class
class st_miqp1:
"""
st_miqp1 function of the MINLPLib test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==5)
value = (50*x[0]*x[0] + 42*x[0] + 50*x[1]*x[1] + 44*x[1] +
50*x[2]*x[2] + 45*x[2] + 50*x[3]*x[3]
+ 47*x[3] + 50*x[4]*x[4] + 47.5*x[4])
# There is one constraint:
# 20*x[0] + 12*x[1] + 11*x[2] + 7*x[3] + 4*x[4] - 40 >= 0
penalty = 100*max(0, -(20*x[0] + 12*x[1] + 11*x[2] + 7*x[3] +
4*x[4] - 40))
return(value + penalty)
dimension = 5
var_lower = np.array([0, 0, 0, 0, 0])
var_upper = np.array([1, 1, 1, 1, 1])
optimum_point = np.array([1.0, 1.0, 1.0, 0.0, 0.0])
optimum_value = 281.0
var_type = np.array(['I'] * 5)
# -- end class
class st_miqp3:
"""
st_miqp3 function of the MINLPLib test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==2)
value = (6*x[0]*x[0] - 3*x[1])
# There is one constraint:
# 4*x[0] - x[1] >= 0
penalty = 10*max(0, -(4*x[0] - x[1]))
return(value + penalty)
dimension = 2
var_lower = np.array([0, 0])
var_upper = np.array([3, 50])
optimum_point = np.array([1.0, 4.0])
optimum_value = -6.0
var_type = np.array(['I'] * 2)
# -- end class
class st_test1:
"""
st_test1 function of the MINLPLib test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==5)
value = (50*x[0]*x[0] + 42*x[0] + 50*x[1]*x[1] - 44*x[1] +
50*x[3]*x[3] - 47*x[3] + 50*x[4]*x[4] - 47.5*x[4] + 45*x[2])
# There is one constraint:
# -20*x[0] - 12*x[1] - 11*x[2] - 7*x[3] - 4*x[4] + 40 >= 0
penalty = 10*max(0, -(-20*x[0] - 12*x[1] - 11*x[2] - 7*x[3] -
4*x[4] + 40))
return(value + penalty)
dimension = 5
var_lower = np.array([0, 0, 0, 0, 0])
var_upper = np.array([1, 1, 1, 1, 1])
optimum_point = np.array([0.0, 0.0, 0.0, 0.0, 0.0])
optimum_value = 0.0
var_type = np.array(['I'] * 5)
# -- end class
class schoen_6_1_int:
"""
schoen function of dimension 6 with 50 stationary points.
Mixed integer version.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==6)
numerator = 0.0
denominator = 0.0
dist = np.sum((x/10 - cls.z)**2, axis=1)
for i in range(50):
prod = 1.0
for j in range(50):
if (i != j):
prod *= dist[j]
numerator += cls.f[i]*prod
denominator += prod
value = numerator/denominator
return(value)
z = np.array(
[[0.298854, 0.181010, 0.984817, 0.125272, 0.548396, 0.894658],
[0.800371, 0.817380, 0.398577, 0.652349, 0.250843, 0.130235],
[0.268631, 0.929778, 0.640422, 0.462004, 0.492930, 0.434955],
[0.257863, 0.729198, 0.210810, 0.364378, 0.228216, 0.947432],
[0.767627, 0.592150, 0.103788, 0.696895, 0.472449, 0.244504],
[0.369630, 0.110889, 0.072344, 0.515753, 0.068087, 0.103057],
[0.425457, 0.807081, 0.491209, 0.449497, 0.065690, 0.592775],
[0.544229, 0.619841, 0.704609, 0.573098, 0.044844, 0.305800],
[0.164031, 0.722884, 0.670496, 0.517915, 0.176386, 0.921565],
[0.153788, 0.703577, 0.899129, 0.406134, 0.941356, 0.538215],
[0.984781, 0.510479, 0.573361, 0.884599, 0.399472, 0.712935],
[0.488416, 0.403997, 0.888823, 0.048434, 0.265197, 0.478025],
[0.047985, 0.280071, 0.709960, 0.278919, 0.035737, 0.037699],
[0.656172, 0.498412, 0.458622, 0.982970, 0.041234, 0.921127],
[0.590802, 0.359690, 0.396516, 0.338153, 0.320793, 0.847369],
[0.649160, 0.846974, 0.451818, 0.064864, 0.818545, 0.955844],
[0.583716, 0.669610, 0.463098, 0.492710, 0.989690, 0.002397],
[0.097300, 0.112389, 0.128759, 0.182995, 0.262808, 0.701887],
[0.487363, 0.892520, 0.269056, 0.116046, 0.905416, 0.808013],
[0.908316, 0.023997, 0.670399, 0.985859, 0.178548, 0.450410],
[0.230409, 0.381732, 0.613667, 0.697260, 0.016950, 0.736507],
[0.132544, 0.526349, 0.650042, 0.084086, 0.979257, 0.771499],
[0.872978, 0.008826, 0.587481, 0.624637, 0.623175, 0.939539],
[0.447828, 0.836386, 0.223285, 0.422756, 0.344488, 0.555953],
[0.546839, 0.153934, 0.953017, 0.640891, 0.666774, 0.647583],
[0.762237, 0.608920, 0.401447, 0.056202, 0.203535, 0.890609],
[0.655150, 0.444544, 0.495582, 0.247926, 0.155128, 0.188004],
[0.481813, 0.387178, 0.597276, 0.634671, 0.285404, 0.714793],
[0.976385, 0.018854, 0.262585, 0.640434, 0.086314, 0.669879],
[0.120164, 0.882300, 0.057626, 0.695111, 0.735135, 0.004711],
[0.414644, 0.715618, 0.642033, 0.770645, 0.407019, 0.502945],
[0.257475, 0.620029, 0.840603, 0.638546, 0.636521, 0.883558],
[0.788980, 0.374926, 0.448016, 0.081941, 0.225763, 0.944905],
[0.661591, 0.178832, 0.790349, 0.141653, 0.424235, 0.571960],
[0.546361, 0.624907, 0.190470, 0.412713, 0.124748, 0.662788],
[0.226384, 0.065829, 0.960836, 0.767766, 0.089695, 0.441792],
[0.303675, 0.370047, 0.973692, 0.830432, 0.424719, 0.173571],
[0.548375, 0.823234, 0.334253, 0.078398, 0.097269, 0.195120],
[0.646225, 0.100478, 0.723833, 0.891035, 0.386094, 0.360272],
[0.362757, 0.114700, 0.731020, 0.783785, 0.250399, 0.244399],
[0.904335, 0.869074, 0.479004, 0.525872, 0.359411, 0.338333],
[0.563175, 0.245903, 0.694417, 0.833524, 0.205055, 0.132535],
[0.401356, 0.920963, 0.401902, 0.120625, 0.765834, 0.381552],
[0.769562, 0.279591, 0.567598, 0.017192, 0.697366, 0.813451],
[0.738572, 0.984740, 0.007616, 0.005382, 0.592976, 0.771773],
[0.683721, 0.824097, 0.731623, 0.936945, 0.182420, 0.393537],
[0.375859, 0.541929, 0.974640, 0.377459, 0.754060, 0.019335],
[0.4, 0.6, 0.1, 0.4, 0.637412, 0.204038],
[0.5, 0.4, 0.4, 0.0, 0.198525, 0.074668],
[0.7, 0.1, 0.3, 0.5, 0.143614, 0.961610]])
f = np.array(
[672.2, 861.4, 520.9, 121.0, 11.5, 48.2, 702.4, 536.2,
457.7, 801.3, 787.7, 768.6, 292.4, 960.0, 573.1, 303.7,
283.3, 474.1, 216.9, 462.2, 853.6, 677.1, 464.6, 830.6,
831.8, 109.6, 967.6, 122.9, 896.2, 490.2, 710.4, 81.1,
802.9, 999.8, 945.5, 672.3, 712.9, 235.8, 266.5, 772.4,
326.6, 585.5, 16.9, 135.9, 224.2, 382.1, 614.6, -1000,
-1000, -1000])
dimension = 6
var_lower = np.array([0 for i in range(6)])
var_upper = np.array([10 for i in range(6)])
optimum_point = np.array([04., 06., 01., 04., 06.37412, 02.04038])
optimum_value = -1000
var_type = np.array(['I'] * 4 + ['R'] * 2)
# -- end class
class schoen_6_2_int:
"""
schoen function of dimension 6 with 50 stationary points.
Mixed integer version.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==6)
numerator = 0.0
denominator = 0.0
dist = np.sum((x/10 - cls.z)**2, axis=1)
for i in range(50):
prod = 1.0
for j in range(50):
if (i != j):
prod *= dist[j]
numerator += cls.f[i]*prod
denominator += prod
value = numerator/denominator
return(value)
z = np.array(
[[0.669711, 0.815540, 0.646120, 0.377447, 0.111538, 0.040529],
[0.000632, 0.706804, 0.857031, 0.473778, 0.993569, 0.616184],
[0.625617, 0.880221, 0.534547, 0.760235, 0.276998, 0.735438],
[0.774577, 0.922914, 0.947791, 0.315328, 0.414841, 0.785803],
[0.079768, 0.131498, 0.225123, 0.464621, 0.638041, 0.992795],
[0.471038, 0.244503, 0.565776, 0.898397, 0.604639, 0.306230],
[0.642233, 0.482219, 0.034943, 0.934805, 0.972714, 0.153664],
[0.550151, 0.310507, 0.042126, 0.230722, 0.444375, 0.117355],
[0.789984, 0.488482, 0.065237, 0.842940, 0.793454, 0.799489],
[0.850183, 0.754551, 0.516033, 0.166362, 0.201966, 0.044234],
[0.000601, 0.896758, 0.304433, 0.149125, 0.178398, 0.871836],
[0.056787, 0.932745, 0.218009, 0.778061, 0.131847, 0.356237],
[0.210266, 0.221479, 0.014831, 0.200901, 0.656693, 0.891819],
[0.528515, 0.178025, 0.188138, 0.411485, 0.217833, 0.907579],
[0.195801, 0.663099, 0.477312, 0.395250, 0.655791, 0.820570],
[0.933208, 0.789323, 0.350520, 0.855434, 0.491082, 0.874993],
[0.251047, 0.543513, 0.529644, 0.218495, 0.351637, 0.608904],
[0.963286, 0.793004, 0.650148, 0.881362, 0.904832, 0.005397],
[0.431744, 0.438965, 0.044544, 0.834968, 0.330614, 0.451282],
[0.234845, 0.328576, 0.388284, 0.339183, 0.206086, 0.600034],
[0.512783, 0.961787, 0.959109, 0.632098, 0.910614, 0.912025],
[0.454168, 0.743189, 0.834284, 0.955817, 0.072172, 0.523068],
[0.696968, 0.720236, 0.341060, 0.054580, 0.045599, 0.549192],
[0.272955, 0.318845, 0.700767, 0.426325, 0.895755, 0.843128],
[0.992189, 0.332899, 0.272784, 0.019284, 0.073711, 0.434800],
[0.154276, 0.639611, 0.924641, 0.587242, 0.358453, 0.548022],
[0.021506, 0.450392, 0.515150, 0.032232, 0.650223, 0.849384],
[0.316499, 0.513234, 0.958219, 0.843587, 0.125408, 0.836643],
[0.538587, 0.261750, 0.732136, 0.030271, 0.893345, 0.270532],
[0.987469, 0.708780, 0.446487, 0.968784, 0.734448, 0.788229],
[0.353358, 0.135036, 0.249018, 0.565029, 0.740519, 0.250807],
[0.810372, 0.656510, 0.472093, 0.225741, 0.420513, 0.202519],
[0.848128, 0.551586, 0.513140, 0.956164, 0.483389, 0.404478],
[0.292239, 0.297077, 0.934202, 0.468329, 0.872274, 0.992632],
[0.828869, 0.534749, 0.716451, 0.405855, 0.164485, 0.531068],
[0.130616, 0.757677, 0.284500, 0.438300, 0.957643, 0.725899],
[0.503542, 0.640368, 0.381914, 0.847206, 0.134660, 0.762294],
[0.653851, 0.646544, 0.436036, 0.944225, 0.310369, 0.392362],
[0.539397, 0.027168, 0.697972, 0.209293, 0.992890, 0.008113],
[0.902045, 0.171034, 0.194924, 0.620057, 0.002203, 0.557433],
[0.802612, 0.085835, 0.380626, 0.492568, 0.238166, 0.961837],
[0.466993, 0.647847, 0.113397, 0.015357, 0.928904, 0.166425],
[0.892021, 0.869756, 0.681364, 0.129555, 0.394682, 0.745036],
[0.060675, 0.869904, 0.757236, 0.220765, 0.615988, 0.754288],
[0.031815, 0.340961, 0.455958, 0.529616, 0.840036, 0.365200],
[0.834595, 0.603639, 0.745330, 0.085080, 0.184636, 0.238718],
[0.575681, 0.250761, 0.874497, 0.870401, 0.854591, 0.968971],
[0.3, 0.7, 0.4, 0.1, 0.258563, 0.932004],
[0.2, 0.9, 0.7, 0.2, 0.375076, 0.154363],
[0.4, 0.4, 0.6, 0.9, 0.579466, 0.524775]])
f = np.array(
[109.6, 132.4, 558.2, 158.0, 6.2, 205.4, 593.9, 2.4,
399.8, 395.9, 212.6, 976.1, 104.4, 552.1, 436.3, 837.1,
283.7, 779.7, 392.1, 85.8, 885.1, 401.5, 367.5, 694.4,
691.6, 933.1, 590.7, 246.2, 370.0, 54.3, 719.4, 95.2,
276.0, 829.1, 613.6, 242.8, 424.6, 320.6, 666.1, 479.2,
420.0, 956.6, 241.0, 21.1, 169.8, 178.1, 394.4, -1000,
-1000, -1000])
dimension = 6
var_lower = np.array([0 for i in range(6)])
var_upper = np.array([10 for i in range(6)])
optimum_point = np.array([03., 07., 04., 01., 02.58563, 09.32004])
optimum_value = -1000
var_type = np.array(['I'] * 4 + ['R'] * 2)
# -- end class
class schoen_10_1_int:
"""
schoen function of dimension 10 with 50 stationary points.
Mixed integer version.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==10)
numerator = 0.0
denominator = 0.0
dist = np.sum((x/10 - cls.z)**2, axis=1)
for i in range(50):
prod = 1.0
for j in range(50):
if (i != j):
prod *= dist[j]
numerator += cls.f[i]*prod
denominator += prod
value = numerator/denominator
return(value)
z = np.array(
[[0.914871, 0.765230, 0.139426, 0.617466, 0.823635,
0.794003, 0.801171, 0.568811, 0.279434, 0.540422],
[0.976983, 0.593277, 0.701115, 0.585262, 0.669106,
0.272906, 0.177127, 0.143389, 0.561181, 0.018744],
[0.385208, 0.984106, 0.390066, 0.905970, 0.169600,
0.191291, 0.564157, 0.689910, 0.857031, 0.715390],
[0.975998, 0.536904, 0.819333, 0.801793, 0.564454,
0.336124, 0.654190, 0.044197, 0.717416, 0.465807],
[0.750519, 0.415284, 0.258927, 0.736115, 0.597744,
0.763716, 0.747691, 0.969633, 0.188117, 0.964954],
[0.412888, 0.671756, 0.380214, 0.558595, 0.768370,
0.998320, 0.212183, 0.606757, 0.531315, 0.303569],
[0.196682, 0.139879, 0.108608, 0.736975, 0.755971,
0.021390, 0.852398, 0.188596, 0.920133, 0.045012],
[0.956270, 0.729258, 0.397664, 0.013146, 0.519861,
0.300011, 0.008396, 0.820346, 0.176841, 0.402298],
[0.126432, 0.872346, 0.923581, 0.297492, 0.992744,
0.486525, 0.915493, 0.589980, 0.498242, 0.989945],
[0.697409, 0.026641, 0.875467, 0.503039, 0.563285,
0.096769, 0.933643, 0.884419, 0.585825, 0.395465],
[0.494783, 0.824300, 0.153326, 0.202651, 0.579815,
0.416954, 0.707624, 0.497959, 0.568876, 0.812841],
[0.126963, 0.757337, 0.648583, 0.787445, 0.822586,
0.401155, 0.301350, 0.562707, 0.744074, 0.088372],
[0.293611, 0.835864, 0.925111, 0.760322, 0.729456,
0.096840, 0.651466, 0.975836, 0.691353, 0.038384],
[0.999250, 0.916829, 0.205699, 0.027241, 0.156956,
0.206598, 0.175242, 0.811219, 0.660192, 0.119865],
[0.387978, 0.665180, 0.774376, 0.135223, 0.766238,
0.380668, 0.058279, 0.727506, 0.991527, 0.345759],
[0.299341, 0.066231, 0.680305, 0.392230, 0.319985,
0.698292, 0.100236, 0.394973, 0.096232, 0.362943],
[0.281548, 0.860858, 0.647870, 0.981650, 0.110777,
0.836484, 0.697387, 0.659942, 0.694425, 0.434991],
[0.606706, 0.052287, 0.858208, 0.738885, 0.158495,
0.002367, 0.933796, 0.112986, 0.647308, 0.421573],
[0.776505, 0.101364, 0.610406, 0.275033, 0.548409,
0.998967, 0.536743, 0.943903, 0.960993, 0.251672],
[0.371347, 0.491122, 0.772374, 0.860206, 0.752131,
0.338591, 0.826739, 0.312111, 0.768881, 0.862719],
[0.866886, 0.358220, 0.131205, 0.276334, 0.334111,
0.429525, 0.752197, 0.167524, 0.437764, 0.162916],
[0.584246, 0.511215, 0.659647, 0.349220, 0.954428,
0.477982, 0.386041, 0.813944, 0.753530, 0.983276],
[0.697327, 0.499835, 0.530487, 0.599958, 0.497257,
0.998852, 0.106262, 0.186978, 0.887481, 0.749174],
[0.041611, 0.278918, 0.999095, 0.825221, 0.218320,
0.383711, 0.077041, 0.642061, 0.668906, 0.758298],
[0.072437, 0.592862, 0.040655, 0.446330, 0.651659,
0.055738, 0.631924, 0.890039, 0.192989, 0.741054],
[0.533886, 0.135079, 0.787647, 0.593408, 0.749228,
0.749045, 0.190386, 0.755508, 0.465321, 0.465156],
[0.748843, 0.696419, 0.882124, 0.843895, 0.858057,
0.220107, 0.350310, 0.102947, 0.453576, 0.875940],
[0.560231, 0.580247, 0.381834, 0.807535, 0.184636,
0.615702, 0.628408, 0.081783, 0.793384, 0.233639],
[0.384827, 0.589138, 0.630013, 0.634506, 0.630712,
0.521293, 0.494486, 0.681700, 0.288512, 0.319808],
[0.721978, 0.452289, 0.426726, 0.323106, 0.781584,
0.999325, 0.043670, 0.884560, 0.520936, 0.430684],
[0.810388, 0.624041, 0.811624, 0.105973, 0.199807,
0.440644, 0.864152, 0.282280, 0.397116, 0.499932],
[0.973889, 0.677797, 0.080137, 0.549098, 0.625445,
0.577342, 0.538642, 0.388039, 0.552273, 0.793807],
[0.365176, 0.228017, 0.623500, 0.084450, 0.177343,
0.910108, 0.632719, 0.521458, 0.894843, 0.707893],
[0.502069, 0.622312, 0.958019, 0.744999, 0.515695,
0.407885, 0.590739, 0.736542, 0.297555, 0.237955],
[0.313835, 0.090014, 0.336274, 0.433171, 0.330864,
0.105751, 0.160367, 0.651934, 0.207260, 0.293577],
[0.886072, 0.592935, 0.498116, 0.321835, 0.011216,
0.543911, 0.506579, 0.216779, 0.406812, 0.261349],
[0.789947, 0.881332, 0.696597, 0.742955, 0.252224,
0.718157, 0.188217, 0.371208, 0.178640, 0.347720],
[0.482759, 0.663618, 0.622706, 0.036170, 0.278854,
0.088147, 0.482808, 0.134824, 0.028828, 0.944537],
[0.184705, 0.662346, 0.917194, 0.186490, 0.918392,
0.955111, 0.636015, 0.447595, 0.813716, 0.372839],
[0.231741, 0.637199, 0.745257, 0.201568, 0.697485,
0.897022, 0.239791, 0.495219, 0.153831, 0.387172],
[0.198061, 0.194102, 0.550259, 0.751804, 0.503973,
0.034252, 0.788267, 0.731760, 0.118338, 0.057247],
[0.068470, 0.545180, 0.668845, 0.714932, 0.688014,
0.203845, 0.146138, 0.109039, 0.470214, 0.441797],
[0.085180, 0.142394, 0.938665, 0.071422, 0.946796,
0.697832, 0.472400, 0.161384, 0.325715, 0.122550],
[0.637672, 0.986961, 0.969438, 0.989508, 0.381318,
0.800871, 0.012035, 0.326007, 0.459124, 0.645374],
[0.147210, 0.954608, 0.361146, 0.094699, 0.092327,
0.301664, 0.478447, 0.008274, 0.680576, 0.004184],
[0.768792, 0.812618, 0.915766, 0.029070, 0.506944,
0.457816, 0.839167, 0.024706, 0.990756, 0.088779],
[0.872678, 0.601536, 0.948347, 0.621023, 0.415621,
0.289340, 0.291338, 0.190461, 0.664007, 0.583513],
[0.6, 0.7, 0.0, 0.355500, 0.294700,
0.3, 0.5, 0.5, 0.759223, 0.508432],
[0.7, 0.0, 0.4, 0.300586, 0.576927,
0.1, 0.2, 0.9, 0.614178, 0.092903],
[0.7, 0.3, 0.7, 0.899419, 0.749499,
0.6, 0.6, 0.0, 0.973726, 0.168336]])
f = np.array(
[799.1, 396.8, 370.3, 400.2, 239.7, 678.8, 868.9, 564.4,
681.6, 153.0, 760.7, 562.9, 434.9, 579.2, 260.6, 88.5,
601.3, 754.8, 894.8, 672.8, 633.7, 921.8, 43.2, 286.2,
945.5, 716.0, 72.7, 631.2, 640.3, 425.1, 825.8, 555.8,
136.9, 805.7, 786.5, 400.0, 856.4, 548.0, 510.8, 52.3,
111.6, 686.6, 888.2, 315.4, 333.9, 61.5, 755.2, -1000,
-1000, -1000])
dimension = 10
var_lower = np.array([0 for i in range(10)])
var_upper = np.array([10 for i in range(10)])
optimum_point = np.array([06., 07., 00., 03.55500, 02.94700,
03., 05., 05., 07.59223, 05.08432])
optimum_value = -1000
var_type = np.array(['I'] * 3 + ['R'] * 2 + ['I'] * 3 + ['R'] * 2)
# -- end class
class schoen_10_2_int:
"""
schoen function of dimension 10 with 50 stationary points.
Mixed integer version.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==10)
numerator = 0.0
denominator = 0.0
dist = np.sum((x/10 - cls.z)**2, axis=1)
for i in range(50):
prod = 1.0
for j in range(50):
if (i != j):
prod *= dist[j]
numerator += cls.f[i]*prod
denominator += prod
value = numerator/denominator
return(value)
z = np.array(
[[0.131461, 0.965235, 0.046134, 0.983011, 0.719813,
0.827542, 0.662422, 0.570546, 0.578707, 0.013264],
[0.068454, 0.682785, 0.582736, 0.434517, 0.310613,
0.869876, 0.993949, 0.629156, 0.590599, 0.356378],
[0.632837, 0.961665, 0.015079, 0.378878, 0.805608,
0.685239, 0.528658, 0.752934, 0.717790, 0.374865],
[0.286191, 0.912944, 0.400358, 0.902532, 0.324887,
0.850063, 0.483503, 0.764147, 0.147726, 0.159851],
[0.303483, 0.754790, 0.090527, 0.653764, 0.164323,
0.402931, 0.593477, 0.448444, 0.711483, 0.113869],
[0.057398, 0.302029, 0.596351, 0.565466, 0.694204,
0.974864, 0.323989, 0.298493, 0.859391, 0.238714],
[0.139267, 0.214902, 0.608462, 0.297987, 0.499810,
0.578553, 0.548077, 0.208442, 0.046162, 0.246848],
[0.680420, 0.783181, 0.828103, 0.475810, 0.680401,
0.188455, 0.015200, 0.650103, 0.762389, 0.063985],
[0.409243, 0.600740, 0.302354, 0.588411, 0.436291,
0.294790, 0.701477, 0.994162, 0.433749, 0.535320],
[0.077949, 0.530126, 0.869737, 0.387811, 0.705317,
0.632911, 0.442087, 0.082918, 0.441383, 0.591975],
[0.622628, 0.054964, 0.020475, 0.145616, 0.163873,
0.321546, 0.282867, 0.743494, 0.750568, 0.732386],
[0.538574, 0.066932, 0.225204, 0.290045, 0.613242,
0.529365, 0.384018, 0.946557, 0.974384, 0.425297],
[0.108817, 0.850094, 0.886417, 0.161581, 0.082973,
0.506354, 0.589650, 0.638991, 0.045151, 0.688464],
[0.917742, 0.365119, 0.484176, 0.173231, 0.210253,
0.303688, 0.992141, 0.023109, 0.977178, 0.535146],
[0.183469, 0.198085, 0.511596, 0.275610, 0.753700,
0.437328, 0.986237, 0.028654, 0.767921, 0.997910],
[0.484908, 0.759122, 0.577318, 0.359934, 0.935730,
0.617833, 0.770173, 0.311175, 0.004831, 0.157457],
[0.634077, 0.236972, 0.016427, 0.261753, 0.349712,
0.245870, 0.412238, 0.523557, 0.985327, 0.094060],
[0.477875, 0.803438, 0.496728, 0.848920, 0.497386,
0.938203, 0.279797, 0.287076, 0.395184, 0.980546],
[0.450215, 0.193712, 0.975838, 0.103925, 0.077410,
0.709573, 0.253072, 0.311723, 0.885664, 0.204528],
[0.557312, 0.815198, 0.097914, 0.539142, 0.826048,
0.130070, 0.049858, 0.223634, 0.076387, 0.831224],
[0.927559, 0.324916, 0.563393, 0.209281, 0.344394,
0.953384, 0.298679, 0.890637, 0.966615, 0.380006],
[0.026403, 0.997573, 0.479163, 0.379686, 0.687928,
0.832002, 0.214326, 0.348248, 0.073151, 0.062646],
[0.726869, 0.911171, 0.961920, 0.874884, 0.216867,
0.076966, 0.776240, 0.495777, 0.963492, 0.425246],
[0.357483, 0.486330, 0.759177, 0.748362, 0.889904,
0.350438, 0.232983, 0.823613, 0.792656, 0.441264],
[0.875826, 0.359459, 0.214808, 0.425850, 0.493328,
0.456048, 0.523145, 0.504154, 0.090128, 0.472437],
[0.813400, 0.808407, 0.427211, 0.902524, 0.210376,
0.490662, 0.915939, 0.169439, 0.078865, 0.485371],
[0.877334, 0.982207, 0.679085, 0.486335, 0.940715,
0.585964, 0.289279, 0.694886, 0.172625, 0.201457],
[0.141599, 0.476124, 0.762246, 0.067045, 0.411332,
0.813196, 0.134138, 0.302390, 0.856145, 0.349243],
[0.346912, 0.082142, 0.787442, 0.857465, 0.371129,
0.448550, 0.967943, 0.775340, 0.943681, 0.656127],
[0.619267, 0.547196, 0.470422, 0.141566, 0.584198,
0.952226, 0.196462, 0.629549, 0.685469, 0.824365],
[0.014209, 0.789812, 0.836373, 0.186139, 0.493840,
0.710697, 0.910033, 0.368287, 0.865953, 0.140892],
[0.482763, 0.072574, 0.026730, 0.143687, 0.739505,
0.419649, 0.013683, 0.662644, 0.785254, 0.234561],
[0.821421, 0.844100, 0.153937, 0.671762, 0.290469,
0.631347, 0.591435, 0.498966, 0.043395, 0.176771],
[0.404994, 0.496656, 0.951774, 0.497357, 0.715401,
0.023378, 0.493045, 0.342766, 0.117055, 0.698590],
[0.985857, 0.831692, 0.423498, 0.215757, 0.341260,
0.790760, 0.941186, 0.716883, 0.062641, 0.582012],
[0.676905, 0.280897, 0.800638, 0.898913, 0.735995,
0.592412, 0.433021, 0.432772, 0.874477, 0.112375],
[0.377382, 0.118941, 0.529204, 0.419434, 0.673891,
0.074904, 0.129868, 0.819585, 0.220536, 0.353223],
[0.233415, 0.136703, 0.487256, 0.777498, 0.901915,
0.612402, 0.778635, 0.436718, 0.484520, 0.641969],
[0.273297, 0.670196, 0.344525, 0.669751, 0.180230,
0.530085, 0.393284, 0.326043, 0.260840, 0.364690],
[0.931213, 0.676123, 0.912481, 0.898258, 0.001887,
0.408306, 0.917215, 0.496959, 0.287951, 0.562511],
[0.047196, 0.780338, 0.895994, 0.088169, 0.552425,
0.130790, 0.308504, 0.232476, 0.187952, 0.105936],
[0.343517, 0.356222, 0.416018, 0.450278, 0.487765,
0.040510, 0.592363, 0.771635, 0.577849, 0.315843],
[0.527759, 0.529503, 0.210423, 0.756794, 0.892670,
0.339374, 0.445837, 0.363265, 0.432114, 0.942045],
[0.560107, 0.110906, 0.115725, 0.761393, 0.969105,
0.921166, 0.455014, 0.593512, 0.111887, 0.217300],
[0.463382, 0.635591, 0.329484, 0.573602, 0.492558,
0.474174, 0.371906, 0.850465, 0.467637, 0.261373],
[0.033051, 0.422543, 0.294155, 0.699026, 0.846231,
0.047967, 0.686826, 0.480273, 0.463181, 0.345601],
[0.285473, 0.723925, 0.202386, 0.671909, 0.685277,
0.993969, 0.415329, 0.155218, 0.233826, 0.088752],
[0.0, 0.6, 0.8, 0.677718, 0.961189,
0.2, 0.8, 0.8, 0.524970, 0.815489],
[0.5, 0.5, 0.1, 0.156163, 0.274566,
0.5, 0.8, 0.8, 0.656166, 0.964211],
[0.1, 0.9, 0.0, 0.178217, 0.408438,
0.2, 0.1, 0.2, 0.051758, 0.906712]])
f = np.array(
[90.4, 830.9, 52.7, 375.2, 289.7, 244.1, 470.2, 111.7,
968.9, 903.4, 918.5, 820.3, 441.2, 687.5, 836.9, 11.0,
454.5, 929.3, 952.6, 937.2, 870.5, 211.7, 378.4, 320.3,
729.6, 420.8, 213.8, 717.7, 285.4, 522.8, 748.3, 371.0,
501.2, 568.6, 111.9, 645.2, 486.2, 157.0, 968.5, 137.6,
127.2, 943.4, 437.2, 199.7, 415.4, 966.0, 362.3, -1000,
-1000, -1000])
dimension = 10
var_lower = np.array([0 for i in range(10)])
var_upper = np.array([10 for i in range(10)])
optimum_point = np.array([00., 06., 08., 06.77718, 09.61189,
02., 08., 08., 05.24970, 08.15489])
optimum_value = -1000
var_type = np.array(['I'] * 3 + ['R'] * 2 + ['I'] * 3 + ['R'] * 2)
# -- end class
class branin_cat:
"""
Branin function of the Dixon-Szego test set, with categorical vars.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==3)
if (x[2] == 0):
fun = lambda x : np.cos(x)
elif (x[2] == 1):
fun = lambda x : np.sin(x)
elif (x[2] == 2):
fun = lambda x : (np.cos(x + np.pi/4))**2
elif (x[2] == 3):
fun = lambda x : (np.sin(x + np.pi/4))**2
value = ((x[1] - (5.1/(4*np.pi*np.pi))*x[0]*x[0] +
5/np.pi*x[0] - 6)**2 + 10*(1-1/(8*np.pi)) *
fun(x[0]) +10)
return(value)
dimension = 3
var_lower = np.array([-5, 0, 0])
var_upper = np.array([10, 15, 3])
optimum_point = np.array([9.42477796, 2.47499998, 0])
additional_optima = np.array([ [-3.14159265, 12.27500000, 0],
[3.14159265, 2.27500000, 0] ])
optimum_value = 0.397887357729739
var_type = np.array(['R', 'R', 'C'])
# -- end class
class hartman3_cat:
"""
Hartman3 function of the Dixon-Szego test set, with categorical vars.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==4)
value = -math.fsum([cls.c[int(x[3]), i] *
np.exp(-math.fsum([cls.a[j][i]*
(x[j] - cls.p[j][i])**2
for j in range(3)]))
for i in range(4) ])
return(value)
a = np.array([ [3.0, 0.1, 3.0, 0.1],
[10.0, 10.0, 10.0, 10.0],
[30.0, 35.0, 30.0, 35.0] ])
p = np.array([ [0.36890, 0.46990, 0.10910, 0.03815],
[0.11700, 0.43870, 0.87320, 0.57430],
[0.26730, 0.74700, 0.55470, 0.88280] ])
c = np.array([[3.2, 2.5, 0.2, 0.7],
[1.0, 1.2, 3.0, 3.2],
[2.2, 1.2, 3.1, 1.7],
[0.1, 2.1, 0.3, 3.7],
[1.8, 0.4, 3.1, 2.4]])
dimension = 4
var_lower = np.array([0, 0, 0, 0])
var_upper = np.array([1, 1, 1, 4])
optimum_point = np.array([0.155995, 0.536521, 0.843994, 3])
optimum_value = -4.822787424687719
var_type = np.array(['R', 'R', 'R', 'C'])
# -- end class
class hartman6_cat:
"""
Hartman6 function of the Dixon-Szego test set, with categorical vars.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==7)
value = -math.fsum([cls.c[int(x[0]), i] *
np.exp(-math.fsum([cls.a[j][i]*
(x[j+1] - cls.p[j][i])**2
for j in range(6)]))
for i in range(4) ])
return(value)
a = np.array([ [10.00, 0.05, 3.00, 17.00],
[3.00, 10.00, 3.50, 8.00],
[17.00, 17.00, 1.70, 0.05],
[3.50, 0.10, 10.00, 10.00],
[1.70, 8.00, 17.00, 0.10],
[8.00, 14.00, 8.00, 14.00] ])
p = np.array([ [0.1312, 0.2329, 0.2348, 0.4047],
[0.1696, 0.4135, 0.1451, 0.8828],
[0.5569, 0.8307, 0.3522, 0.8732],
[0.0124, 0.3736, 0.2883, 0.5743],
[0.8283, 0.1004, 0.3047, 0.1091],
[0.5886, 0.9991, 0.6650, 0.0381] ])
c = np.array([[1.0, 1.2, 3.0, 3.2],
[3.2, 2.5, 0.2, 0.7],
[2.2, 1.2, 3.1, 1.7],
[0.1, 2.1, 0.3, 3.7],
[1.8, 0.4, 3.1, 2.4]])
dimension = 7
var_lower = np.array([0, 0, 0, 0, 0, 0, 0])
var_upper = np.array([4, 1, 1, 1, 1, 1, 1])
optimum_point = np.array([2, 0.177401, 0.153512, 0.516698,
0.256499, 0.323092, 0.646352])
optimum_value = -3.96231691936822
var_type = np.array(['C'] + ['R'] * 6)
# -- end class
class ex8_1_1_cat:
"""
ex8_1_1 function of the GlobalLib test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==4)
if (x[2] == 0):
fun1 = lambda x : np.sin(x)
elif (x[2] == 1):
fun1 = lambda x : np.cos(x)
elif (x[2] == 2):
fun1 = lambda x : (np.cos(x + np.pi/4))**2
elif (x[2] == 3):
fun1 = lambda x : (np.sin(x + np.pi/4))**2
if (x[3] == 0):
fun2 = lambda x : (np.sin(x + np.pi/4))**2
elif (x[3] == 1):
fun2 = lambda x : np.sin(x)
elif (x[3] == 2):
fun2 = lambda x : (np.cos(x + np.pi/4))**2
elif (x[3] == 3):
fun2 = lambda x : np.cos(x)
value = fun1(x[0])*fun2(x[1]) - x[0]/(x[1]**2+1)
return(value)
dimension = 4
var_lower = np.array([-1, -1, 0, 0])
var_upper = np.array([2, 1, 3, 3])
optimum_point = np.array([2.0, -0.00030, 1, 3])
optimum_value = -2.4161466378205514
var_type = np.array(['R'] * 2 + ['C', 'C'])
# -- end class
class schoen_10_1_cat:
"""
schoen function of dimension 10 with categorical variables.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==12)
numerator = 0.0
denominator = 0.0
# Categorical variable determining function for the first 25
# points. We want these functions to be nonnegative on
# distances.
if (x[0] == 0):
fun1 = lambda x : x**2
elif (x[0] == 1):
fun1 = lambda x : np.abs(x + 50)
elif (x[0] == 2):
fun1 = lambda x : np.log(x + 10)
elif (x[0] == 3):
fun1 = lambda x : (np.sin(x))**2
if (x[1] == 0):
fun2 = lambda x : np.log(x + 10)
elif (x[1] == 1):
fun2 = lambda x : np.abs(x + 50)
elif (x[1] == 2):
fun2 = lambda x : x**2
elif (x[1] == 3):
fun2 = lambda x : (np.sin(x))**2
dist1 = np.sum(fun1(x[2:] - cls.z), axis=1)
dist2 = np.sum(fun2(x[2:] - cls.z), axis=1)
for i in range(25):
prod = 1.0
for j in range(50):
if (i != j):
prod *= dist1[j]
numerator += cls.f[i]*prod
denominator += prod
for i in range(25, 50):
prod = 1.0
for j in range(50):
if (i != j):
prod *= dist2[j]
numerator += cls.f[i]*prod
denominator += prod
value = numerator/denominator
return(value)
z = np.array(
[[0.914871, 0.765230, 0.139426, 0.617466, 0.823635,
0.794003, 0.801171, 0.568811, 0.279434, 0.540422],
[0.976983, 0.593277, 0.701115, 0.585262, 0.669106,
0.272906, 0.177127, 0.143389, 0.561181, 0.018744],
[0.385208, 0.984106, 0.390066, 0.905970, 0.169600,
0.191291, 0.564157, 0.689910, 0.857031, 0.715390],
[0.975998, 0.536904, 0.819333, 0.801793, 0.564454,
0.336124, 0.654190, 0.044197, 0.717416, 0.465807],
[0.750519, 0.415284, 0.258927, 0.736115, 0.597744,
0.763716, 0.747691, 0.969633, 0.188117, 0.964954],
[0.412888, 0.671756, 0.380214, 0.558595, 0.768370,
0.998320, 0.212183, 0.606757, 0.531315, 0.303569],
[0.196682, 0.139879, 0.108608, 0.736975, 0.755971,
0.021390, 0.852398, 0.188596, 0.920133, 0.045012],
[0.956270, 0.729258, 0.397664, 0.013146, 0.519861,
0.300011, 0.008396, 0.820346, 0.176841, 0.402298],
[0.126432, 0.872346, 0.923581, 0.297492, 0.992744,
0.486525, 0.915493, 0.589980, 0.498242, 0.989945],
[0.697409, 0.026641, 0.875467, 0.503039, 0.563285,
0.096769, 0.933643, 0.884419, 0.585825, 0.395465],
[0.494783, 0.824300, 0.153326, 0.202651, 0.579815,
0.416954, 0.707624, 0.497959, 0.568876, 0.812841],
[0.126963, 0.757337, 0.648583, 0.787445, 0.822586,
0.401155, 0.301350, 0.562707, 0.744074, 0.088372],
[0.293611, 0.835864, 0.925111, 0.760322, 0.729456,
0.096840, 0.651466, 0.975836, 0.691353, 0.038384],
[0.999250, 0.916829, 0.205699, 0.027241, 0.156956,
0.206598, 0.175242, 0.811219, 0.660192, 0.119865],
[0.387978, 0.665180, 0.774376, 0.135223, 0.766238,
0.380668, 0.058279, 0.727506, 0.991527, 0.345759],
[0.299341, 0.066231, 0.680305, 0.392230, 0.319985,
0.698292, 0.100236, 0.394973, 0.096232, 0.362943],
[0.281548, 0.860858, 0.647870, 0.981650, 0.110777,
0.836484, 0.697387, 0.659942, 0.694425, 0.434991],
[0.606706, 0.052287, 0.858208, 0.738885, 0.158495,
0.002367, 0.933796, 0.112986, 0.647308, 0.421573],
[0.776505, 0.101364, 0.610406, 0.275033, 0.548409,
0.998967, 0.536743, 0.943903, 0.960993, 0.251672],
[0.371347, 0.491122, 0.772374, 0.860206, 0.752131,
0.338591, 0.826739, 0.312111, 0.768881, 0.862719],
[0.866886, 0.358220, 0.131205, 0.276334, 0.334111,
0.429525, 0.752197, 0.167524, 0.437764, 0.162916],
[0.584246, 0.511215, 0.659647, 0.349220, 0.954428,
0.477982, 0.386041, 0.813944, 0.753530, 0.983276],
[0.697327, 0.499835, 0.530487, 0.599958, 0.497257,
0.998852, 0.106262, 0.186978, 0.887481, 0.749174],
[0.041611, 0.278918, 0.999095, 0.825221, 0.218320,
0.383711, 0.077041, 0.642061, 0.668906, 0.758298],
[0.072437, 0.592862, 0.040655, 0.446330, 0.651659,
0.055738, 0.631924, 0.890039, 0.192989, 0.741054],
[0.533886, 0.135079, 0.787647, 0.593408, 0.749228,
0.749045, 0.190386, 0.755508, 0.465321, 0.465156],
[0.748843, 0.696419, 0.882124, 0.843895, 0.858057,
0.220107, 0.350310, 0.102947, 0.453576, 0.875940],
[0.560231, 0.580247, 0.381834, 0.807535, 0.184636,
0.615702, 0.628408, 0.081783, 0.793384, 0.233639],
[0.384827, 0.589138, 0.630013, 0.634506, 0.630712,
0.521293, 0.494486, 0.681700, 0.288512, 0.319808],
[0.721978, 0.452289, 0.426726, 0.323106, 0.781584,
0.999325, 0.043670, 0.884560, 0.520936, 0.430684],
[0.810388, 0.624041, 0.811624, 0.105973, 0.199807,
0.440644, 0.864152, 0.282280, 0.397116, 0.499932],
[0.973889, 0.677797, 0.080137, 0.549098, 0.625445,
0.577342, 0.538642, 0.388039, 0.552273, 0.793807],
[0.365176, 0.228017, 0.623500, 0.084450, 0.177343,
0.910108, 0.632719, 0.521458, 0.894843, 0.707893],
[0.502069, 0.622312, 0.958019, 0.744999, 0.515695,
0.407885, 0.590739, 0.736542, 0.297555, 0.237955],
[0.313835, 0.090014, 0.336274, 0.433171, 0.330864,
0.105751, 0.160367, 0.651934, 0.207260, 0.293577],
[0.886072, 0.592935, 0.498116, 0.321835, 0.011216,
0.543911, 0.506579, 0.216779, 0.406812, 0.261349],
[0.789947, 0.881332, 0.696597, 0.742955, 0.252224,
0.718157, 0.188217, 0.371208, 0.178640, 0.347720],
[0.482759, 0.663618, 0.622706, 0.036170, 0.278854,
0.088147, 0.482808, 0.134824, 0.028828, 0.944537],
[0.184705, 0.662346, 0.917194, 0.186490, 0.918392,
0.955111, 0.636015, 0.447595, 0.813716, 0.372839],
[0.231741, 0.637199, 0.745257, 0.201568, 0.697485,
0.897022, 0.239791, 0.495219, 0.153831, 0.387172],
[0.198061, 0.194102, 0.550259, 0.751804, 0.503973,
0.034252, 0.788267, 0.731760, 0.118338, 0.057247],
[0.068470, 0.545180, 0.668845, 0.714932, 0.688014,
0.203845, 0.146138, 0.109039, 0.470214, 0.441797],
[0.085180, 0.142394, 0.938665, 0.071422, 0.946796,
0.697832, 0.472400, 0.161384, 0.325715, 0.122550],
[0.637672, 0.986961, 0.969438, 0.989508, 0.381318,
0.800871, 0.012035, 0.326007, 0.459124, 0.645374],
[0.147210, 0.954608, 0.361146, 0.094699, 0.092327,
0.301664, 0.478447, 0.008274, 0.680576, 0.004184],
[0.768792, 0.812618, 0.915766, 0.029070, 0.506944,
0.457816, 0.839167, 0.024706, 0.990756, 0.088779],
[0.872678, 0.601536, 0.948347, 0.621023, 0.415621,
0.289340, 0.291338, 0.190461, 0.664007, 0.583513],
[0.641216, 0.700152, 0.080576, 0.355500, 0.294700,
0.338614, 0.563964, 0.528079, 0.759223, 0.508432],
[0.738489, 0.077376, 0.429485, 0.300586, 0.576927,
0.185931, 0.231659, 0.954833, 0.614178, 0.092903],
[0.729321, 0.318607, 0.768657, 0.899419, 0.749499,
0.623403, 0.671793, 0.052835, 0.973726, 0.168336]])
f = np.array(
[-1000, -1000, -1000, 799.1, 396.8, 370.3, 400.2, 239.7,
678.8, 868.9, 564.4, 681.6, 153.0, 760.7, 562.9, 434.9,
579.2, 260.6, 88.5, 601.3, 754.8, 894.8, 672.8, 633.7, 921.8,
43.2, 286.2, 945.5, 716.0, 72.7, 631.2, 640.3, 425.1, 825.8,
555.8, 136.9, 805.7, 786.5, 400.0, 856.4, 548.0, 510.8, 52.3,
111.6, 686.6, 888.2, 315.4, 333.9, 61.5, 755.2])
dimension = 12
var_lower = np.array([0, 0] + [0 for i in range(10)])
var_upper = np.array([3, 3] + [1 for i in range(10)])
optimum_point = np.array([0, 2, 0.914871, 0.765230, 0.139426, 0.617466,
0.823635, 0.794003, 0.801171, 0.568811,
0.279434, 0.540422])
optimum_value = -1000
var_type = np.array(['C', 'C'] + ['R'] * 10)
# -- end class
class schoen_10_2_cat:
"""
schoen function of dimension 10 with categorical variables.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==12)
numerator = 0.0
denominator = 0.0
# Categorical variable determining function for the first 25
# points. We want these functions to be nonnegative on
# distances.
if (x[0] == 0):
fun1 = lambda x : x**2
elif (x[0] == 1):
fun1 = lambda x : np.abs(x + 50)
elif (x[0] == 2):
fun1 = lambda x : np.log(x + 10)
elif (x[0] == 3):
fun1 = lambda x : (np.sin(x))**2
if (x[1] == 0):
fun2 = lambda x : np.log(x + 10)
elif (x[1] == 1):
fun2 = lambda x : np.abs(x + 50)
elif (x[1] == 2):
fun2 = lambda x : x**2
elif (x[1] == 3):
fun2 = lambda x : (np.sin(x))**2
dist1 = np.sum(fun1(x[2:] - cls.z), axis=1)
dist2 = np.sum(fun2(x[2:] - cls.z), axis=1)
for i in range(25):
prod = 1.0
for j in range(50):
if (i != j):
prod *= dist1[j]
numerator += cls.f[i]*prod
denominator += prod
for i in range(25, 50):
prod = 1.0
for j in range(50):
if (i != j):
prod *= dist2[j]
numerator += cls.f[i]*prod
denominator += prod
value = numerator/denominator
return(value)
z = np.array(
[[0.131461, 0.965235, 0.046134, 0.983011, 0.719813,
0.827542, 0.662422, 0.570546, 0.578707, 0.013264],
[0.068454, 0.682785, 0.582736, 0.434517, 0.310613,
0.869876, 0.993949, 0.629156, 0.590599, 0.356378],
[0.632837, 0.961665, 0.015079, 0.378878, 0.805608,
0.685239, 0.528658, 0.752934, 0.717790, 0.374865],
[0.286191, 0.912944, 0.400358, 0.902532, 0.324887,
0.850063, 0.483503, 0.764147, 0.147726, 0.159851],
[0.303483, 0.754790, 0.090527, 0.653764, 0.164323,
0.402931, 0.593477, 0.448444, 0.711483, 0.113869],
[0.057398, 0.302029, 0.596351, 0.565466, 0.694204,
0.974864, 0.323989, 0.298493, 0.859391, 0.238714],
[0.139267, 0.214902, 0.608462, 0.297987, 0.499810,
0.578553, 0.548077, 0.208442, 0.046162, 0.246848],
[0.680420, 0.783181, 0.828103, 0.475810, 0.680401,
0.188455, 0.015200, 0.650103, 0.762389, 0.063985],
[0.409243, 0.600740, 0.302354, 0.588411, 0.436291,
0.294790, 0.701477, 0.994162, 0.433749, 0.535320],
[0.077949, 0.530126, 0.869737, 0.387811, 0.705317,
0.632911, 0.442087, 0.082918, 0.441383, 0.591975],
[0.622628, 0.054964, 0.020475, 0.145616, 0.163873,
0.321546, 0.282867, 0.743494, 0.750568, 0.732386],
[0.538574, 0.066932, 0.225204, 0.290045, 0.613242,
0.529365, 0.384018, 0.946557, 0.974384, 0.425297],
[0.108817, 0.850094, 0.886417, 0.161581, 0.082973,
0.506354, 0.589650, 0.638991, 0.045151, 0.688464],
[0.917742, 0.365119, 0.484176, 0.173231, 0.210253,
0.303688, 0.992141, 0.023109, 0.977178, 0.535146],
[0.183469, 0.198085, 0.511596, 0.275610, 0.753700,
0.437328, 0.986237, 0.028654, 0.767921, 0.997910],
[0.484908, 0.759122, 0.577318, 0.359934, 0.935730,
0.617833, 0.770173, 0.311175, 0.004831, 0.157457],
[0.634077, 0.236972, 0.016427, 0.261753, 0.349712,
0.245870, 0.412238, 0.523557, 0.985327, 0.094060],
[0.477875, 0.803438, 0.496728, 0.848920, 0.497386,
0.938203, 0.279797, 0.287076, 0.395184, 0.980546],
[0.450215, 0.193712, 0.975838, 0.103925, 0.077410,
0.709573, 0.253072, 0.311723, 0.885664, 0.204528],
[0.557312, 0.815198, 0.097914, 0.539142, 0.826048,
0.130070, 0.049858, 0.223634, 0.076387, 0.831224],
[0.927559, 0.324916, 0.563393, 0.209281, 0.344394,
0.953384, 0.298679, 0.890637, 0.966615, 0.380006],
[0.026403, 0.997573, 0.479163, 0.379686, 0.687928,
0.832002, 0.214326, 0.348248, 0.073151, 0.062646],
[0.726869, 0.911171, 0.961920, 0.874884, 0.216867,
0.076966, 0.776240, 0.495777, 0.963492, 0.425246],
[0.357483, 0.486330, 0.759177, 0.748362, 0.889904,
0.350438, 0.232983, 0.823613, 0.792656, 0.441264],
[0.875826, 0.359459, 0.214808, 0.425850, 0.493328,
0.456048, 0.523145, 0.504154, 0.090128, 0.472437],
[0.813400, 0.808407, 0.427211, 0.902524, 0.210376,
0.490662, 0.915939, 0.169439, 0.078865, 0.485371],
[0.877334, 0.982207, 0.679085, 0.486335, 0.940715,
0.585964, 0.289279, 0.694886, 0.172625, 0.201457],
[0.141599, 0.476124, 0.762246, 0.067045, 0.411332,
0.813196, 0.134138, 0.302390, 0.856145, 0.349243],
[0.346912, 0.082142, 0.787442, 0.857465, 0.371129,
0.448550, 0.967943, 0.775340, 0.943681, 0.656127],
[0.619267, 0.547196, 0.470422, 0.141566, 0.584198,
0.952226, 0.196462, 0.629549, 0.685469, 0.824365],
[0.014209, 0.789812, 0.836373, 0.186139, 0.493840,
0.710697, 0.910033, 0.368287, 0.865953, 0.140892],
[0.482763, 0.072574, 0.026730, 0.143687, 0.739505,
0.419649, 0.013683, 0.662644, 0.785254, 0.234561],
[0.821421, 0.844100, 0.153937, 0.671762, 0.290469,
0.631347, 0.591435, 0.498966, 0.043395, 0.176771],
[0.404994, 0.496656, 0.951774, 0.497357, 0.715401,
0.023378, 0.493045, 0.342766, 0.117055, 0.698590],
[0.985857, 0.831692, 0.423498, 0.215757, 0.341260,
0.790760, 0.941186, 0.716883, 0.062641, 0.582012],
[0.676905, 0.280897, 0.800638, 0.898913, 0.735995,
0.592412, 0.433021, 0.432772, 0.874477, 0.112375],
[0.377382, 0.118941, 0.529204, 0.419434, 0.673891,
0.074904, 0.129868, 0.819585, 0.220536, 0.353223],
[0.233415, 0.136703, 0.487256, 0.777498, 0.901915,
0.612402, 0.778635, 0.436718, 0.484520, 0.641969],
[0.273297, 0.670196, 0.344525, 0.669751, 0.180230,
0.530085, 0.393284, 0.326043, 0.260840, 0.364690],
[0.931213, 0.676123, 0.912481, 0.898258, 0.001887,
0.408306, 0.917215, 0.496959, 0.287951, 0.562511],
[0.047196, 0.780338, 0.895994, 0.088169, 0.552425,
0.130790, 0.308504, 0.232476, 0.187952, 0.105936],
[0.343517, 0.356222, 0.416018, 0.450278, 0.487765,
0.040510, 0.592363, 0.771635, 0.577849, 0.315843],
[0.527759, 0.529503, 0.210423, 0.756794, 0.892670,
0.339374, 0.445837, 0.363265, 0.432114, 0.942045],
[0.560107, 0.110906, 0.115725, 0.761393, 0.969105,
0.921166, 0.455014, 0.593512, 0.111887, 0.217300],
[0.463382, 0.635591, 0.329484, 0.573602, 0.492558,
0.474174, 0.371906, 0.850465, 0.467637, 0.261373],
[0.033051, 0.422543, 0.294155, 0.699026, 0.846231,
0.047967, 0.686826, 0.480273, 0.463181, 0.345601],
[0.285473, 0.723925, 0.202386, 0.671909, 0.685277,
0.993969, 0.415329, 0.155218, 0.233826, 0.088752],
[0.029705, 0.651519, 0.813239, 0.677718, 0.961189,
0.285385, 0.824635, 0.837670, 0.524970, 0.815489],
[0.519627, 0.508274, 0.141067, 0.156163, 0.274566,
0.536322, 0.834749, 0.852042, 0.656166, 0.964211],
[0.119675, 0.971352, 0.052983, 0.178217, 0.408438,
0.215091, 0.102098, 0.256312, 0.051758, 0.906712]])
f = np.array(
[-1000, -1000, -1000, 90.4, 830.9, 52.7, 375.2, 289.7, 244.1,
470.2, 111.7, 968.9, 903.4, 918.5, 820.3, 441.2, 687.5, 836.9,
11.0, 454.5, 929.3, 952.6, 937.2, 870.5, 211.7, 378.4, 320.3,
729.6, 420.8, 213.8, 717.7, 285.4, 522.8, 748.3, 371.0, 501.2,
568.6, 111.9, 645.2, 486.2, 157.0, 968.5, 137.6, 127.2, 943.4,
437.2, 199.7, 415.4, 966.0, 362.3])
dimension = 12
var_lower = np.array([0, 0] + [0 for i in range(10)])
var_upper = np.array([3, 3] + [1 for i in range(10)])
optimum_point = np.array([0, 2, 0.131461, 0.965235, 0.046134, 0.983011,
0.719813, 0.827542, 0.662422, 0.570546,
0.578707, 0.013264])
optimum_value = -1000
var_type = np.array(['C', 'C'] + ['R'] * 10)
# -- end class
class gear4_cat:
"""
gear4 function of the MINLPLib test set, with categorical variables
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==6)
if (x[5] == 0):
fun = lambda x, y : np.sqrt(x + y)
elif (x[5] == 1):
fun = lambda x, y : x/y
elif (x[5] == 2):
fun = lambda x, y : np.log(x * y * 2)
elif (x[5] == 3):
fun = lambda x, y : x/(y + 10)
elif (x[5] == 4):
fun = lambda x, y : np.max(x, y)
value = -1000000*x[0]*x[1]/(x[2]*x[3]) + 2*x[4] + 144279.32477276
# There is a constraint:
# -1000000*x[0]*x[1]/(x[2]*x[3]) + x[4] + 144279.32477276 >= 0
penalty = 10*max(0,-(-1000000*x[0]*x[1]/(x[2]*x[3]) + x[4] +
144279.32477276))
return(value + penalty)
dimension = 6
var_lower = np.array([12, 12, 12, 12, 0, 0])
var_upper = np.array([60, 60, 60, 60, 100, 4])
optimum_point = np.array([19.0, 16.0, 43.0, 49.0, 1.64342847396619, 1])
optimum_value = 1.6434284739
var_type = np.array(['I'] * 4 + ['R'] + ['C'])
# -- end class
class nvs07_cat:
"""
nvs07 function of the MINLPLib test set, with categorical variables.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==4)
if (x[3] == 2):
fun = lambda x : (x-1)**3
elif (x[3] == 3):
fun = lambda x : np.exp(x/50)
elif (x[3] == 4):
fun = lambda x : x**2
value = 2*fun(x[1]) + x[0] + 5*x[2]
# There are two constraints:
# x[2]**2 * x[1] + 5*x[2] + 3*x[0] - 10 >= 0
# x[0] - x[2] - 2.66 >= 0
penalty = 0.0
penalty += 10*max(0, -(fun(x[2]) * x[1] + 5*x[2] + 3*x[0] - 10))
penalty += 10*max(0, -(x[0] - x[2] - 2.66))
return(value + penalty)
dimension = 4
var_lower = np.array([0, 0, 0, 2])
var_upper = np.array([200, 200, 200, 4])
optimum_point = np.array([4.0, 0.0, 0.0, 2])
optimum_value = 2.0
var_type = np.array(['I'] * 3 + ['C'])
# -- end class
class nvs09_cat:
"""
nvs09 function of the MINLPLib test set with categorical variables
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==11)
if (x[10] == 0):
fun = np.sqrt
elif (x[10] == 1):
fun = np.abs
elif (x[10] == 2):
fun = lambda x : 1/(x-0.9)
elif (x[10] == 3):
fun = np.log
elif (x[10] == 4):
fun = lambda x : x - 2
value = ((fun(x[0] - 2))**2 + (fun(10 - x[0]))**2 +
(fun(x[1] - 2))**2 + (fun(10 - x[1]))**2 +
(fun(x[2] - 2))**2 + (fun(10 - x[2]))**2 +
(fun(x[3] - 2))**2 + (fun(10 - x[3]))**2 +
(fun(x[4] - 2))**2 + (fun(10 - x[4]))**2 +
(fun(x[5] - 2))**2 + (fun(10 - x[5]))**2 +
(fun(x[6] - 2))**2 + (fun(10 - x[6]))**2 +
(fun(x[7] - 2))**2 + (fun(10 - x[7]))**2 +
(fun(x[8] - 2))**2 + (fun(10 - x[8]))**2 +
(fun(x[9] - 2))**2 + (fun(10 - x[9]))**2 -
(x[0]*x[1]*x[2]*x[3]*x[4]*x[5]*x[6]*x[7]*x[8]*x[9])**0.2)
return(value)
dimension = 11
var_lower = np.array([3 for i in range(10)] + [0])
var_upper = np.array([9 for i in range(10)] + [4])
optimum_point = np.array([8, 8, 8, 8, 7, 8, 7, 8, 8, 8, 2])
optimum_value = -53.179649471788274
var_type = np.array(['I'] * 10 + ['C'])
# -- end class
class st_miqp1_cat:
"""
st_miqp1 function of the MINLPLib test set, with categorical variables.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==6)
value = (50*x[1]*x[1] + 42*x[1] + 50*x[2]*x[2] + 44*x[2] +
50*x[3]*x[3] + 45*x[3] + 50*x[4]*x[4]
+ 47*x[4] + 50*x[5]*x[5] + 47.5*x[5])
# There is one constraint:
# 20*x[0] + 12*x[1] + 11*x[2] + 7*x[3] + 4*x[4] - 40 >= 0
penalty = 100*max(0, -sum(cls.c[int(x[0])][j]*x[j+1]
for j in range(5)) +
cls.c[int(x[0])][-1])
return(value + penalty)
c = np.array([[20, 12, 11, 7, 4, 40],
[8, 23, 12, 10, 10, 35],
[12, 25, 30, 4, 22, 60],
[18, 3, 15, 33, 25, 45],
[27, 11, 9, 20, 13, 32],
[9, 31, 25, 9, 27, 42],
[14, 23, 18, 12, 33, 37]])
dimension = 6
var_lower = np.array([0, 0, 0, 0, 0, 0])
var_upper = np.array([5, 1, 1, 1, 1, 1])
optimum_point = np.array([4, 1.0, 1.0, 0.0, 0.0, 0.0])
optimum_value = 186.0
var_type = np.array(['C'] + ['I'] * 5)
# -- end class
class schaeffer_f7_12_1_int_cat:
"""
Schaeffer F7 function with integer and categorical variables
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==13)
if (x[12] == 0):
fun = lambda x : np.cos(x)
elif (x[12] == 1):
fun = lambda x : np.sin(x)
elif (x[12] == 2):
fun = lambda x : (np.cos(x + np.pi/4))**2
elif (x[12] == 3):
fun = lambda x : (np.sin(x + np.pi/4))**2
value = 0
normalizer = 1.0/float(len(x)-2)
for i in range(len(x)-2):
si = 2**i*np.sqrt((x[i]-cls.optimum_point[i])**2 +
(x[i+1]-cls.optimum_point[i+1])**2)
value += (normalizer * np.sqrt(si) *
(fun(50*si**0.20) + 1))**2
return value - 10
dimension = 13
var_lower = np.array([-50 for i in range(12)] + [0])
var_upper = np.array([50 for i in range(12)] + [3])
optimum_point = np.array([-34.32567, -34.98896, 07.69262, 30.3388,
-48.24371, 23.18355, 24.93374, 32.07436,
46.86153, 4, 25, -16, 1])
optimum_value = -10
var_type = np.array(['R'] * 9 + ['I'] * 3 + ['C'])
# -- end class
class schaeffer_f7_12_2_int_cat:
"""
Schaeffer F7 function with integer and categorical variables.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==13)
if (x[12] == 0):
fun = lambda x : np.cos(x)
elif (x[12] == 1):
fun = lambda x : np.sin(x)
elif (x[12] == 2):
fun = lambda x : (np.cos(x + np.pi/4))**2
elif (x[12] == 3):
fun = lambda x : (np.sin(x + np.pi/4))**2
value = 0
normalizer = 1.0/float(len(x)-2)
for i in range(len(x)-2):
si = 2**i*np.sqrt((x[i]-cls.optimum_point[i])**2 +
(x[i+1]-cls.optimum_point[i+1])**2)
value += (normalizer * np.sqrt(si) *
(fun(50*si**0.20) + 1))**2
return value - 10
dimension = 13
var_lower = np.array([-50 for i in range(12)] + [0])
var_upper = np.array([50 for i in range(12)] + [3])
optimum_point = np.array([-08.214, 30.69133, 48.26095, -04.94219,
15.15357, 00.4841, -13.54025, -40.78766,
-16.02916, 16, 39, -49, 1])
optimum_value = -10
var_type = np.array(['R'] * 9 + ['I'] * 3 + ['C'])
# -- end class
class TestBlackBox(RbfoptBlackBox):
"""A black-box constructed from a known test function.
Parameters
----------
name : string
The name of the function to be implemented.
"""
def __init__(self, name):
"""Constructor.
"""
try:
thismodule = sys.modules[__name__]
self._function = getattr(thismodule, name.lower())
except AttributeError:
raise ValueError('Function ' + name + ' not implemented')
def get_dimension(self):
return self._function.dimension
def get_var_lower(self):
return self._function.var_lower
def get_var_upper(self):
return self._function.var_upper
def get_var_type(self):
return self._function.var_type
def evaluate(self, point):
return self._function.evaluate(point)
def evaluate_noisy(self, point):
raise NotImplementedError('evaluate_noisy() not implemented')
def has_evaluate_noisy(self):
return False
# -- end class
class TestNoisyBlackBox(RbfoptBlackBox):
"""A noisy black-box constructed from a given black-box function.
Parameters
----------
blackbox : `RbfoptBlackBox`
The black box function to which noise is added.
max_rel_error: float
Maximum relative error.
max_abs_error: float
Maximum absolute error.
"""
def __init__(self, blackbox, max_rel_error = 0.1, max_abs_error = 0.1):
"""Constructor.
"""
assert(max_rel_error >= 0.0)
assert(max_abs_error >= 0.0)
try:
# Get the original function if it is one from this module
self._function = getattr(blackbox, '_function')
except AttributeError:
pass
self._bb = blackbox
self._max_rel_error = max_rel_error
self._max_abs_error = max_abs_error
def get_dimension(self):
return self._bb.get_dimension()
def get_var_lower(self):
return self._bb.get_var_lower()
def get_var_upper(self):
return self._bb.get_var_upper()
def get_var_type(self):
return self._bb.get_var_type()
def evaluate(self, point):
return self._bb.evaluate(point)
def evaluate_noisy(self, point):
value = self._bb.evaluate(point)
rel_noise = np.random.uniform(-self._max_rel_error,
self._max_rel_error)
abs_noise = np.random.uniform(-self._max_abs_error,
self._max_abs_error)
return np.array([value + rel_noise*abs(value) + abs_noise,
- abs(rel_noise*abs(value) + abs_noise),
+ abs(rel_noise*abs(value) + abs_noise)])
def has_evaluate_noisy(self):
return True
# -- end class
class TestEnlargedBlackBox(RbfoptBlackBox):
"""A black-box constructed increasing the size of a test function.
Construct a black box function from a given function, increasing
its dimension by a given factor. The new function is put together
from several independent copies of the original function, plus a
coupling term. If the dimension muldiplier is `d` and the original
function has dimension `n`, the new function has dimension `n*d`
and is computed as:
.. math::
\sum_{j=1}^{d} a_j f(x_{(j-1)n+1},\dots,x_{jn}) + 0.4
f(g_1(x),\dots,g_n(x)),
where `a_j` are random weights that add up to 0.6, and `g_1`
through `g_n` are linear functions of a random subset of the
variables. These linear function are appropriately scaled and
clipped so that we do not exceed the original function bounds. The
optimum of the new function stays the same. Finally, all variables
are randomly permuted.
Parameters
----------
name : string
The name of the function to be implemented.
dimension_multiplier : int
Dimension multiplier
"""
def __init__(self, name, dimension_multiplier=1):
"""Constructor.
"""
assert(dimension_multiplier>=1)
try:
thismodule = sys.modules[__name__]
self._function = getattr(thismodule, name.lower())
except AttributeError:
raise ValueError('Function ' + name + ' not implemented')
dim = self._function.dimension
perm = np.random.permutation(dim * dimension_multiplier)
bounds = []
mult_factor = []
shift = []
# For the copy of the function coupling all variables,
# pick dimension_multiplier random variables to add together
coupling = np.reshape(np.random.permutation(dim*dimension_multiplier),
(dim, dimension_multiplier))
for i in range(dim):
# The bounds of the sum are just the sum of the lower
# and upper bounds of the component variables
lb = sum(self._function.var_lower[perm[val] % dim]
for val in coupling[i])
ub = sum(self._function.var_upper[perm[val] % dim]
for val in coupling[i])
bounds.append([lb, ub])
# The coefficients are computed so that the optimum
# stays the same
shifted_opt = sum(self._function.optimum_point[perm[val] % dim]
for val in coupling[i])
# Check the position of the optimum in the interval
ratio = (shifted_opt - lb)/(ub - lb)
orig_ratio = ((self._function.optimum_point[i] -
self._function.var_lower[i]) /
(self._function.var_upper[i] -
self._function.var_lower[i]))
# The multiplication factor should bring the
# transformed optimum to the original optimum
if (ratio != 0.0 and orig_ratio != 0.0):
mult_factor.append(orig_ratio / ratio)
shift.append(0)
elif (orig_ratio == 0.0):
# The true optimum is at the lower bound. We have to
# ensure the transformed point is mapped to it. The
# correct ratio would be zero, but to let the point
# vary, we change the transformed bound instead. The
# "max" in the bound is to prevent errors in case the
# shifted optimum is at the upper bound.
bounds[-1] = [shifted_opt, max(ub, shifted_opt+1)]
mult_factor.append(1.0)
shift.append(0)
else:
# The transformed point is at the lower bound. Ensure
# it can reach the true optimum.
mult_factor.append(1.0)
shift.append(self._function.optimum_point[i] -
self._function.var_lower[i])
# Compute weight of each copy of the function
int_weights = np.random.randint(1, 10, dimension_multiplier)
weight = np.array([0.6*val/sum(int_weights)
for val in int_weights] + [0.4])
# Store data necessary for function evaluation
self.coupling = coupling
self.extra_bounds = | np.array(bounds) | numpy.array |
import numpy as np
import pytest
from numpy.testing import assert_allclose
from ..linalg.cholesky import ChoInv, ChoInvPivot, cholesky_factor, fixed_cholesky
from ..linalg.cholesky import jit_cholesky, _check_cholesky_inputs, pivot_cholesky, _pivot_transpose
from ..linalg.linalg_utils import calc_Ainv, calc_A_deriv, calc_mean_params, calc_R, logdet_deriv
from ..Kernel import SquaredExponential
from ..Priors import MeanPriors
from scipy import linalg
@pytest.fixture
def A():
return np.array([[2., 1., 0.2], [1., 2., 0.4], [0.2, 0.4, 2.]])
@pytest.fixture
def b():
return np.array([2., 3., 1.])
def test_ChoInv(A, b):
"test the ChoInv class"
L = linalg.cholesky(A, lower=True)
Ainv = ChoInv(L)
assert_allclose(Ainv.L, L)
x = np.linalg.solve(A, b)
assert_allclose(Ainv.solve(b), x)
assert_allclose(np.log(np.linalg.det(A)), Ainv.logdet())
assert Ainv.solve(np.zeros((3,0))).shape == (3,0)
Ainv = ChoInv(np.zeros((0,0)))
assert Ainv.solve(np.ones(3)).shape == (3,)
Ainv = ChoInv(2.*np.ones((1,1)))
assert_allclose(Ainv.solve(np.ones((1, 3, 1))), 0.25*np.ones((1,3,1)))
def test_ChoInvPivot(A, b):
"test the cho_solve routine using pivoting"
L = np.linalg.cholesky(A)
x = linalg.cho_solve((L, True), b)
L_pivot, P = pivot_cholesky(A)
Ainv = ChoInvPivot(L_pivot, P)
x_pivot = Ainv.solve(b)
assert_allclose(x, x_pivot)
with pytest.raises(AssertionError):
ChoInvPivot(L_pivot, np.array([0, 2, 1, 1], dtype=np.int32)).solve(b)
with pytest.raises(ValueError):
ChoInvPivot(L_pivot, np.array([0, 0, 1], dtype=np.int32)).solve(b)
def test_check_cholesky_inputs():
"Test function that checks inputs to cholesky decomposition routines"
A = np.array([[2., 1.], [1., 2.]])
B = _check_cholesky_inputs(A)
assert_allclose(A, B)
A = np.array([[1., 2.], [1., 2.]])
with pytest.raises(AssertionError):
_check_cholesky_inputs(A)
A = np.array([1., 2.])
with pytest.raises(AssertionError):
_check_cholesky_inputs(A)
A = np.array([[1., 2., 3.], [4., 5., 6.]])
with pytest.raises(AssertionError):
_check_cholesky_inputs(A)
input_matrix = np.array([[-1., 2., 2.], [2., 3., 2.], [2., 2., -3.]])
with pytest.raises(linalg.LinAlgError):
_check_cholesky_inputs(input_matrix)
def test_fixed_cholesky(A):
"Test the cholesky routine with fixed nugget"
L_expected = np.array([[2., 0., 0.], [6., 1., 0.], [-8., 5., 3.]])
input_matrix = np.array([[4., 12., -16.], [12., 37., -43.], [-16., -43., 98.]])
L_actual = fixed_cholesky(input_matrix)
assert_allclose(L_actual, L_expected)
L_actual, nugget = cholesky_factor(input_matrix, 0., "fixed")
assert_allclose(L_actual.L, L_expected)
assert nugget == 0.
L_actual, nugget = cholesky_factor(input_matrix, 0., "fit")
assert_allclose(L_actual.L, L_expected)
assert nugget == 0.
L_expected = np.array([[1.0000004999998751e+00, 0.0000000000000000e+00, 0.0000000000000000e+00],
[9.9999950000037496e-01, 1.4142132088085626e-03, 0.0000000000000000e+00],
[6.7379436301144941e-03, 4.7644444411381860e-06, 9.9997779980004420e-01]])
input_matrix = np.array([[1. + 1.e-6 , 1. , 0.0067379469990855 ],
[1. , 1. + 1.e-6 , 0.0067379469990855 ],
[0.0067379469990855, 0.0067379469990855, 1. + 1.e-6 ]])
L_actual = fixed_cholesky(input_matrix)
assert_allclose(L_expected, L_actual)
def test_jit_cholesky():
"Tests the stabilized Cholesky decomposition routine"
L_expected = np.array([[2., 0., 0.], [6., 1., 0.], [-8., 5., 3.]])
input_matrix = np.array([[4., 12., -16.], [12., 37., -43.], [-16., -43., 98.]])
L_actual, jitter = jit_cholesky(input_matrix)
assert_allclose(L_expected, L_actual)
assert_allclose(jitter, 0.)
L_expected = np.array([[1.0000004999998751e+00, 0.0000000000000000e+00, 0.0000000000000000e+00],
[9.9999950000037496e-01, 1.4142132088085626e-03, 0.0000000000000000e+00],
[6.7379436301144941e-03, 4.7644444411381860e-06, 9.9997779980004420e-01]])
input_matrix = np.array([[1. , 1. , 0.0067379469990855],
[1. , 1. , 0.0067379469990855],
[0.0067379469990855, 0.0067379469990855, 1. ]])
L_actual, jitter = jit_cholesky(input_matrix)
assert_allclose(L_expected, L_actual)
assert_allclose(jitter, 1.e-6)
L_actual, jitter = cholesky_factor(input_matrix, 0., "adaptive")
assert_allclose(L_expected, L_actual.L)
assert_allclose(jitter, 1.e-6)
input_matrix = np.array([[1.e-6, 1., 0.], [1., 1., 1.], [0., 1., 1.e-10]])
with pytest.raises(linalg.LinAlgError):
jit_cholesky(input_matrix)
def test_pivot_cholesky():
"Tests pivoted cholesky decomposition routine"
input_matrix = np.array([[4., 12., -16.], [12., 37., -43.], [-16., -43., 98.]])
input_matrix_copy = np.copy(input_matrix)
L_expected = np.array([[ 9.899494936611665 , 0. , 0. ],
[-4.3436559415745055, 4.258245303082538 , 0. ],
[-1.616244071283537 , 1.1693999481734827, 0.1423336335961131]])
Piv_expected = np.array([2, 1, 0], dtype = np.int32)
L_actual, Piv_actual = pivot_cholesky(input_matrix)
assert_allclose(L_actual, L_expected)
assert np.array_equal(Piv_expected, Piv_actual)
assert_allclose(input_matrix, input_matrix_copy)
input_matrix = np.array([[1., 1., 1.e-6], [1., 1., 1.e-6], [1.e-6, 1.e-6, 1.]])
input_matrix_copy = np.copy(input_matrix)
L_expected = np.array([[1.0000000000000000e+00, 0.0000000000000000e+00, 0.0000000000000000e+00],
[9.9999999999999995e-07, 9.9999999999949996e-01, 0.0000000000000000e+00],
[1.0000000000000000e+00, 0.0000000000000000e+00, 3.3333333333316667e-01]])
Piv_expected = np.array([0, 2, 1], dtype=np.int32)
L_actual, Piv_actual = pivot_cholesky(input_matrix)
assert_allclose(L_actual, L_expected)
assert np.array_equal(Piv_expected, Piv_actual)
assert_allclose(input_matrix, input_matrix_copy)
L_actual, nugget = cholesky_factor(input_matrix, np.array([]), "pivot")
assert_allclose(L_actual.L, L_expected)
assert np.array_equal(Piv_expected, L_actual.P)
assert len(nugget) == 0
def test_pivot_transpose():
"Test function to invert pivot matrix"
P = np.array([0, 2, 1], dtype = np.int32)
Piv = _pivot_transpose(P)
np.array_equal(Piv, P)
P = np.array([1, 2, 1], dtype = np.int32)
with pytest.raises(ValueError):
_pivot_transpose(P)
P = np.array([[0, 1, 2], [2, 1, 0]], dtype=np.int32)
with pytest.raises(AssertionError):
_pivot_transpose(P)
@pytest.fixture
def dm():
return np.array([[1., 1.], [1., 2.], [1., 4.]])
@pytest.fixture
def Kinv(A):
return ChoInv(np.linalg.cholesky(A))
def test_calc_Ainv(A, dm, Kinv):
"test the function to compute inverse of A"
# Zero mean, weak mean covariance
dm_z = np.zeros((3, 0))
B = MeanPriors()
result = calc_Ainv(Kinv, dm_z, B)
assert result.L.shape == (0,0)
# nonzero mean, weak mean covariance
result = calc_Ainv(Kinv, dm, B)
result_expected = np.linalg.cholesky(np.dot(dm.T, np.dot(np.linalg.inv(A), dm)))
assert_allclose(result.L, result_expected)
# nonzero mean, mean covariance
B = MeanPriors(mean=[2., 1.], cov=np.eye(2))
result = calc_Ainv(Kinv, dm, B)
result_expected = np.linalg.cholesky(np.dot(dm.T, np.dot(np.linalg.inv(A), dm)) + np.eye(2))
assert_allclose(result.L, result_expected)
with pytest.raises(AssertionError):
calc_Ainv(1., dm, B)
with pytest.raises(AssertionError):
calc_Ainv(Kinv, dm, 1.)
def test_calc_A_deriv(dm):
"test calculating the derivative of Ainv"
x = np.array([[1.], [2.], [4.]])
K = SquaredExponential().kernel_f(x, x, [0.])
dKdtheta = SquaredExponential().kernel_deriv(x, x, [0.])
Kinv = ChoInv(np.linalg.cholesky(K))
A = np.dot(dm.T, Kinv.solve(dm))
deriv_expect = calc_A_deriv(Kinv, dm, dKdtheta)
dx = 1.e-6
K2 = SquaredExponential().kernel_f(x, x, [-dx])
Kinv_2 = ChoInv(np.linalg.cholesky(K2))
A2 = np.dot(dm.T, Kinv_2.solve(dm))
deriv_fd = np.zeros((1, 2, 2))
deriv_fd[0] = (A - A2)/dx
assert_allclose(deriv_expect, deriv_fd, atol=1.e-6, rtol=1.e-6)
# zero mean
dm_z = np.zeros((3, 0))
A = np.dot(dm_z.T, Kinv.solve(dm_z))
deriv_expect = calc_A_deriv(Kinv, dm_z, dKdtheta)
dx = 1.e-6
K2 = SquaredExponential().kernel_f(x, x, [-dx])
Kinv_2 = ChoInv(np.linalg.cholesky(K2))
A2 = np.dot(dm_z.T, Kinv_2.solve(dm_z))
deriv_fd = np.zeros((1, 0, 0))
deriv_fd[0] = (A - A2)/dx
assert_allclose(deriv_expect, deriv_fd, atol=1.e-6, rtol=1.e-6)
with pytest.raises(AssertionError):
calc_A_deriv(1., dm, dKdtheta)
with pytest.raises(AssertionError):
calc_A_deriv(Kinv, dm, np.ones(3))
with pytest.raises(AssertionError):
calc_A_deriv(Kinv, dm, np.ones((1, 2, 3)))
with pytest.raises(AssertionError):
calc_A_deriv(Kinv, dm, np.ones((1, 2, 2)))
def test_calc_mean_params(A, dm, Kinv):
"test the calc_mean_params function"
# weak mean priors
Kinv_t = Kinv.solve(np.array([1., 2., 4.]))
B = MeanPriors()
Ainv = calc_Ainv(Kinv, dm, B)
beta_actual = calc_mean_params(Ainv, Kinv_t, dm, B)
beta_expected = Ainv.solve(np.dot(dm.T, Kinv_t))
assert_allclose(beta_actual, beta_expected)
# mean priors
B = MeanPriors(mean=[2., 1.], cov=np.eye(2))
beta_actual = calc_mean_params(Ainv, Kinv_t, dm, B)
beta_expected = Ainv.solve(np.dot(dm.T, Kinv_t) + np.array([2., 1.]))
assert_allclose(beta_actual, beta_expected)
with pytest.raises(AssertionError):
calc_mean_params(1., Kinv_t, dm, B)
with pytest.raises(AssertionError):
calc_mean_params(Ainv, Kinv_t, dm, 1.)
def test_calc_R(A, dm, Kinv):
"test the calc_R function"
dmtest = np.array([[1., 3.], [1., 5.]])
Ktest = | np.array([[0.2, 0.6, 0.8], [0.1, 0.2, 1.]]) | numpy.array |
"""
Functions are useful untilities for SITperturb experiments
Notes
-----
Author : <NAME>
Date : 13 August 2017
Usage
-----
[1] calcDecJan(varx,vary,lat,lon,level,levsq)
[2] calcDecJanFeb(varx,vary,lat,lon,level,levsq)
[3] calc_FDR_ttest(varx,vary,alpha_f)
[4] calc_indttest(varx,vary)
[5] calc_weightedAve(var,lats)
[6] calc_spatialCorr(varx,vary,lats,lons,weight)
[7] calc_RMSE(varx,vary,lats,lons,weight)
[8] calc_spatialCorrHeight(varx,vary,lats,lons,weight)
[9] calc_spatialCorrHeightLev(varx,vary,lats,lons,weight,levelq)
[10] detrendData(datavar,years,level,yearmn,yearmx)
[11] detrendDataR(datavar,years,level,yearmn,yearmx)
[12] mk_test(x, alpha)
"""
def calcDecJan(varx,vary,lat,lon,level,levsq):
"""
Function calculates average for December-January
Parameters
----------
varx : 4d array or 5d array
[year,month,lat,lon] or [year,month,lev,lat,lon]
vary : 4d array or 5d array
[year,month,lat,lon] or [year,month,lev,lat,lon]
lat : 1d numpy array
latitudes
lon : 1d numpy array
longitudes
level : string
Height of variable (surface or profile)
levsq : integer
number of levels
Returns
-------
varx_dj : 3d array or 4d array
[year,lat,lon] or [year,lev,lat,lon]
vary_dj : 3d array
[year,lat,lon] or [year,lev,lat,lon]
Usage
-----
varx_dj,vary_dj = calcDecJan(varx,vary,lat,lon,level,levsq)
"""
print('\n>>> Using calcDecJan function!')
### Import modules
import numpy as np
### Reshape for 3d variables
if level == 'surface':
varxravel = np.reshape(varx.copy(),
(int(varx.shape[0]*12),
int(lat.shape[0]),int(lon.shape[0])))
varyravel = np.reshape(vary.copy(),
(int(vary.shape[0]*12),
int(lat.shape[0]),int(lon.shape[0])))
varx_dj = np.empty((varx.shape[0]-1,lat.shape[0],lon.shape[0]))
vary_dj = np.empty((vary.shape[0]-1,lat.shape[0],lon.shape[0]) )
for i in range(0,varxravel.shape[0]-12,12):
counter = 0
if i >= 12:
counter = i//12
djappendh = np.append(varxravel[11+i,:,:],varxravel[12+i,:,:])
djappendf = np.append(varyravel[11+i,:,:],varyravel[12+i,:,:])
varx_dj[counter,:,:] = np.nanmean(np.reshape(djappendh,
(2,int(lat.shape[0]),int(lon.shape[0]))),
axis=0)
vary_dj[counter,:,:] = np.nanmean(np.reshape(djappendf,
(2,int(lat.shape[0]),int(lon.shape[0]))),
axis=0)
### Reshape for 4d variables
elif level == 'profile':
varxravel = np.reshape(varx.copy(),
(int(varx.shape[0]*12.),levsq,
int(lat.shape[0]),int(lon.shape[0])))
varyravel = np.reshape(vary.copy(),
(int(vary.shape[0]*12.),levsq,
int(lat.shape[0]),int(lon.shape[0])))
varx_dj = np.empty((int(varx.shape[0]-1),levsq,
int(lat.shape[0]),int(lon.shape[0])))
vary_dj = np.empty((int(vary.shape[0]-1),levsq,
int(lat.shape[0]),int(lon.shape[0])) )
for i in range(0,varxravel.shape[0]-12,12):
counter = 0
if i >= 12:
counter = i//12
djappendh = np.append(varxravel[11+i,:,:,:],
varxravel[12+i,:,:,:])
djappendf = np.append(varyravel[11+i,:,:,:],
varyravel[12+i,:,:,:])
varx_dj[counter,:,:] = np.nanmean(np.reshape(djappendh,
(2,levsq,int(lat.shape[0]),
int(lon.shape[0]))),axis=0)
vary_dj[counter,:,:] = np.nanmean(np.reshape(djappendf,
(2,levsq,int(lat.shape[0]),
int(lon.shape[0]))),axis=0)
else:
print(ValueError('Selected wrong height - (surface or profile!)!'))
print('Completed: Organized data by months (ON,DJ,FM)!')
print('*Completed: Finished calcDecJan function!')
return varx_dj,vary_dj
###############################################################################
###############################################################################
###############################################################################
def calcDecJanFeb(varx,vary,lat,lon,level,levsq):
"""
Function calculates average for December-January-February
Parameters
----------
varx : 4d array or 5d array
[year,month,lat,lon] or [year,month,lev,lat,lon]
vary : 4d array or 5d array
[year,month,lat,lon] or [year,month,lev,lat,lon]
lat : 1d numpy array
latitudes
lon : 1d numpy array
longitudes
level : string
Height of variable (surface or profile)
levsq : integer
number of levels
Returns
-------
varx_djf : 3d array or 4d array
[year,lat,lon] or [year,lev,lat,lon]
vary_djf : 3d array
[year,lat,lon] or [year,lev,lat,lon]
Usage
-----
varx_djf = calcDecJanFeb(varx,vary,lat,lon,level,levsq)
"""
print('\n>>> Using calcDecJanFeb function!')
### Import modules
import numpy as np
### Reshape for 3d variables
if level == 'surface':
varxravel = np.reshape(varx.copy(),
(int(varx.shape[0]*12),
int(lat.shape[0]),int(lon.shape[0])))
varyravel = np.reshape(vary.copy(),
(int(vary.shape[0]*12),
int(lat.shape[0]),int(lon.shape[0])))
varx_djf = np.empty((varx.shape[0]-1,lat.shape[0],lon.shape[0]))
vary_djf = np.empty((vary.shape[0]-1,lat.shape[0],lon.shape[0]) )
for i in range(0,varxravel.shape[0]-12,12):
counter = 0
if i >= 12:
counter = i//12
djfappendh1 = np.append(varxravel[11+i,:,:],varxravel[12+i,:,:])
djfappendf1 = np.append(varyravel[11+i,:,:],varyravel[12+i,:,:])
djfappendh = np.append(djfappendh1,varxravel[13+i,:,:])
djfappendf = np.append(djfappendf1,varyravel[13+i,:,:])
varx_djf[counter,:,:] = np.nanmean(np.reshape(djfappendh,
(3,int(lat.shape[0]),int(lon.shape[0]))),
axis=0)
vary_djf[counter,:,:] = np.nanmean(np.reshape(djfappendf,
(3,int(lat.shape[0]),int(lon.shape[0]))),
axis=0)
### Reshape for 4d variables
elif level == 'profile':
varxravel = np.reshape(varx.copy(),
(int(varx.shape[0]*12.),levsq,
int(lat.shape[0]),int(lon.shape[0])))
varyravel = np.reshape(vary.copy(),
(int(vary.shape[0]*12.),levsq,
int(lat.shape[0]),int(lon.shape[0])))
varx_djf = np.empty((int(varx.shape[0]-1),levsq,
int(lat.shape[0]),int(lon.shape[0])))
vary_djf = np.empty((int(vary.shape[0]-1),levsq,
int(lat.shape[0]),int(lon.shape[0])) )
for i in range(0,varxravel.shape[0]-12,12):
counter = 0
if i >= 12:
counter = i//12
djfappendh1 = np.append(varxravel[11+i,:,:,:],
varxravel[12+i,:,:,:])
djfappendf1 = np.append(varyravel[11+i,:,:,:],
varyravel[12+i,:,:,:])
djfappendh = np.append(djfappendh1,
varxravel[13+i,:,:,:])
djfappendf = np.append(djfappendf1,
varyravel[13+i,:,:,:])
varx_djf[counter,:,:] = np.nanmean(np.reshape(djfappendh,
(3,levsq,int(lat.shape[0]),
int(lon.shape[0]))),axis=0)
vary_djf[counter,:,:] = np.nanmean(np.reshape(djfappendf,
(3,levsq,int(lat.shape[0]),
int(lon.shape[0]))),axis=0)
else:
print(ValueError('Selected wrong height - (surface or profile!)!'))
print('Completed: Organized data by months (DJF)!')
print('*Completed: Finished calcDecJanFeb function!')
return varx_djf
###############################################################################
###############################################################################
###############################################################################
def calc_FDR_ttest(varx,vary,alpha_f):
"""
Function first calculates statistical difference for 2 independent
sample t-test and then adjusts using a false discovery rate (FDR)
where alpha_o = alpha_FDR
Parameters
----------
varx : 2d or 3d array
vary : 2d or 3d array
alpha_f : float (alpha_o = alpha_FDR)
Returns
-------
pruns : 1d or 2d array of adjusted p values
Usage
-----
calc_FDR_ttest(varx,vary,alpha_f)
"""
print('\n>>> Using calc_FDR_ttest function!')
### Import modules
import numpy as np
import scipy.stats as sts
import statsmodels.stats.multitest as fdr
### 2-independent sample t-test
stat,pvalue = sts.ttest_ind(varx,vary,nan_policy='omit')
### Ravel all 2d pvalues
if pvalue.ndim == 2:
pvalall = np.reshape(pvalue,(pvalue.shape[0]* pvalue.shape[1]))
else:
pvalall = pvalue
### Calculate false discovery rate
prunsq = np.empty((pvalall.shape))
score = np.empty((pvalall.shape))
prunsq.fill(np.nan)
score.fill(np.nan)
### Check for nans before correction!!
mask = np.isfinite(pvalall[:])
score[mask],prunsq[mask] = fdr.fdrcorrection(pvalall[mask],alpha=alpha_f,
method='indep')
### Reshape into correct dimensions
pruns = np.reshape(prunsq,(pvalue.shape))
### Mask variables by their adjusted p-values
pruns[ | np.where(pruns >= alpha_f) | numpy.where |
"""
Author: <NAME>
Date: 01/30/2019
"""
import json
import websocket
import traceback
import helper
import ssl
import time as time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import griddata
from mpl_toolkits.mplot3d import Axes3D
class vol_surface(object):
"""Derbit volatiolity analytics tool for decision making"""
def __init__(self, url='', on_message=None, traceback=2, save_local=False, plot_type=None):
"""
Program constructor
:param url: Requested websocket address
:param on_message: event message
:param traceback: number of hours to look back from
:param save_local: True if data is stored to local
:param plot_type: Plot type (currently support scatter plot 2D, scatter plot 3D, and surface plot 3D
"""
self.url = url
self.traceback = traceback
self.save_local = save_local
self.plot_type = plot_type
try:
self.vol_data = pd.read_csv("volatility.csv")
except FileNotFoundError:
self.vol_data = pd.DataFrame()
self.ws = None
self.active = False
self.on_message = on_message
self.action = "/api/v1/public/getlasttrades"
def on_message(self, message):
"""
Websocket response message
:param message: response message in dict format.
"""
if self.on_message:
self.on_message()
else:
print(message)
def start(self):
"""
Start websocket
"""
self.ws = websocket.create_connection(self.url, sslopt={'cert_reqs': ssl.CERT_NONE})
self.active = True
self.on_connect()
self.run()
def on_connect(self):
"""
Call when websocket is connected.
"""
print('connected')
def reconnect(self):
"""
Reconnect to websocket server.
"""
self.ws = websocket.create_connection(self.url, sslopt={'cert_reqs': ssl.CERT_NONE})
self.on_connect()
def on_error(self, err):
"""
Print message when error occur
"""
print(err)
def send_req(self, req):
"""
Send request to websocket server
"""
self.ws.send(json.dumps(req))
print(req)
@staticmethod
def concurrent_data_handler(message):
"""
using pandas to transform the message into format we intended
:param message: message received from websocket
:return: revised data-stream
"""
temp_df = pd.DataFrame(message['result'])
temp_df = temp_df[['instrument', 'direction', 'indexPrice', 'price', 'quantity', 'iv', 'timeStamp', 'tradeId']]
temp_df['timeStamp'] = temp_df['timeStamp'] / 1000
temp_df['C-P'] = temp_df['instrument'].str.split('-', expand=True)[3]
temp_df['strike'] = temp_df['instrument'].str.split('-', expand=True)[2].astype(float)
temp_df['end_ts'] = pd.DataFrame(
pd.to_datetime(temp_df['instrument'].str.split('-', expand=True)[1]).values.astype(np.int64) / 1000000000)
temp_df['expiration_t'] = (temp_df['end_ts'] - temp_df['timeStamp']) / (365 * 24 * 3600)
temp_df['option_price'] = temp_df['price'] * temp_df['indexPrice']
return temp_df
@staticmethod
def vis_tool(df, exp_ts, plot_type="scatter_3D"):
"""
Help to visualize the volatility skew/smile of past trades
:param df: A dictionary object passed from the previous function
:param exp_ts: expiration time
:param plot_type: Plot type (currently support scatter plot 2D, scatter plot 3D, and surface plot 3D)
:return: A PyPlot object
"""
x = df['strike']
y = df['expiration_t']
z = df['iv']
area = df['quantity'] * 3 # this is a scalar used for drawing
def make_surf(x, y, z):
x_grids, y_grids = np.meshgrid(np.linspace(min(x), max(x), 100), np.linspace(min(y), max(y), 100))
z_grids = griddata( | np.array([x, y]) | numpy.array |
# -*- coding: utf-8 -*-
"""Copia di Classification.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/19w462hZ5-StoAmR7fA-GquAcs-hibbSU
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
from matplotlib.ticker import AutoMinorLocator
from matplotlib import gridspec
#scaling, normalization
from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler
from sklearn import metrics
from google.colab import files
#caricamento del dataset
df = pd.read_csv('words_glasgow.csv')
#faccio una copia del dataset in caso di manipolazione dati
dfcopy= df.copy()
df2 = df.copy()
df2["perceivability"] = df2[["imageability", "concreteness"]].mean(axis=1)
df_perc=df2.drop(["concreteness","imageability"], axis=1)
dfprepro= df_perc.copy()
dfprepro=dfprepro.rename(columns={"gender": "masculinity"})
dfprepro.loc[(dfprepro['web_corpus_freq'].isnull() == True), 'web_corpus_freq'] = dfprepro['web_corpus_freq'].mean()
dfprepro["web_corpus_log"] = pd.qcut(dfprepro["web_corpus_freq"], 10) #taglio la variabile web_corpus_freq in 10 gruppi
dataframe = [dfprepro]
for dataset in dataframe:
dataset.loc[(dataset["web_corpus_freq"] > 10000) & (dataset["web_corpus_freq"] <= 100000), "web_corpus_freq"] = 4
dataset.loc[(dataset["web_corpus_freq"] > 100000) & (dataset["web_corpus_freq"] <= 1000000), "web_corpus_freq"] = 5
dataset.loc[(dataset["web_corpus_freq"] > 1000000) & (dataset["web_corpus_freq"] <= 10000000), "web_corpus_freq"] = 6
dataset.loc[(dataset["web_corpus_freq"] > 10000000) & (dataset["web_corpus_freq"] <= 100000000), "web_corpus_freq"] = 7
dataset.loc[(dataset["web_corpus_freq"] > 100000000) & (dataset["web_corpus_freq"] <= 1000000000), "web_corpus_freq"] = 8
dataset.loc[dataset["web_corpus_freq"] > 1000000000, "web_corpus_freq"] = 9
dfprepro = dfprepro.drop(["web_corpus_log","word"], axis=1)
"""# Preprocess for classification"""
# per il decision tree
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
# visualizzarlo
from sklearn import tree
import pydotplus
from IPython.display import Image
# evaluazione
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score, f1_score, classification_report
from sklearn.metrics import roc_curve, auc, roc_auc_score
# hyperparameter tuning
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
# cross-validation
from sklearn.model_selection import cross_val_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import plot_confusion_matrix
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
df_class_ref = dfprepro.copy()
var_to_scale=['aoa',"arousal","valence","dominance","familiarity","semsize","masculinity","perceivability"]
features = df_class_ref[var_to_scale]
scaler = MinMaxScaler().fit(features.values)
features = scaler.transform(features.values)
df_class_ref[var_to_scale] = features
"""#Decision Tree (mostly) and comparison with other methods (binary varaibles only)
### Arousal
"""
refvar="arousal"
taglio=0.55
X=df_class_ref.drop(refvar,axis=1).copy()
y=df_class_ref[refvar].copy()
y_up_index = y >= taglio
y[y_up_index]=1
y_zero_index = y < taglio
y[y_zero_index]=0
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
clf_dt = DecisionTreeClassifier(criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42)
clf_dt = clf_dt.fit(X_train, y_train)
plt.figure(figsize=(15,7.5))
from sklearn.tree import plot_tree
plot_tree(clf_dt,
filled=True,
rounded=True,
class_names=["not aroused","aroused"],
feature_names=X.columns)
from sklearn.metrics import confusion_matrix
from sklearn.metrics import plot_confusion_matrix
from sklearn.model_selection import cross_val_score
plot_confusion_matrix(clf_dt, X_test, y_test, display_labels=["not aroused","aroused"])
y_pred = clf_dt.predict(X_train)
y_pred = clf_dt.predict(X_test)
print('Accuracy %s' % accuracy_score(y_test, y_pred))
print('F1-score %s' % f1_score(y_test, y_pred, average=None))
print(classification_report(y_test, y_pred))
y_score = clf_dt.predict_proba(X_test)
fpr, tpr, th = roc_curve(y_test, y_score[:,1])
roc_auc = auc(fpr, tpr)
print(roc_auc)
plt.figure(figsize=(8,5))
plt.plot(fpr, tpr, label='$AUC$ = %.3f' % (roc_auc))
plt.legend(loc="lower right", fontsize=14, frameon=False)
plt.plot([0,1], [0,1], 'k--')
plt.xlabel('False Positive Rate', fontsize=20)
plt.ylabel('True Positive Rate', fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=22)
plt.show()
path = clf_dt.cost_complexity_pruning_path(X_train, y_train)
ccp_alphas = path.ccp_alphas
ccp_alphas = ccp_alphas[:-1]
clf_dts=[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(random_state=0, ccp_alpha=ccp_alpha)
clf_dt.fit(X_train, y_train)
clf_dts.append(clf_dt)
train_scores = [clf_dt.score(X_train, y_train) for clf_dt in clf_dts]
test_scores = [clf_dt.score(X_test, y_test) for clf_dt in clf_dts]
fig, ax =plt.subplots()
ax.set_xlabel("alpha")
ax.set_ylabel("accuracy")
ax.set_title("Accuracy vs alpha for training and testing sets")
ax.plot(ccp_alphas,train_scores, marker ='o',label='train',drawstyle='steps-post')
ax.plot(ccp_alphas,test_scores, marker ='o',label='test',drawstyle='steps-post')
ax.legend()
plt.show()
clf_dt = DecisionTreeClassifier(criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42, ccp_alpha=0.003)
scores= cross_val_score(clf_dt,X_train,y_train, cv=10)
df=pd.DataFrame(data={'tree':range(10), 'accuracy':scores})
df.plot(x='tree', y='accuracy',marker='o',linestyle='--')
alpha_loop_values =[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(random_state=0, ccp_alpha=ccp_alpha)
scores= cross_val_score(clf_dt,X_train,y_train, cv=10)
alpha_loop_values.append([ccp_alpha,np.mean(scores), np.std(scores)])
alpha_results = pd.DataFrame(alpha_loop_values,
columns=['alpha','mean_accuracy','std'])
alpha_results.plot(x='alpha',
y='mean_accuracy',
marker='o',
linestyle='--')
alpha_results[(alpha_results['alpha']>0.001)
&
(alpha_results['alpha']<0.005)]
indexmax = alpha_results[['mean_accuracy']].idxmax()
maxalpha=alpha_results.loc[indexmax,'alpha']
ideal_ccp_alpha = float(maxalpha)
clf_dt_pruned = DecisionTreeClassifier(criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42, ccp_alpha=ideal_ccp_alpha)
clf_dt_pruned = clf_dt_pruned.fit(X_train, y_train)
plot_confusion_matrix(clf_dt_pruned,
X_test,
y_test,
display_labels=['not aroused','aroused'])
plt.figure(figsize=(15,7.5))
from sklearn.tree import plot_tree
plot_tree(clf_dt_pruned,
filled=True,
rounded=True,
class_names=["not aroused","aroused"],
feature_names=X.columns)
y_pred = clf_dt_pruned.predict(X_train)
y_pred = clf_dt_pruned.predict(X_test)
print('Accuracy %s' % accuracy_score(y_test, y_pred))
print('F1-score %s' % f1_score(y_test, y_pred,average='weighted'))
print(classification_report(y_test, y_pred))
y_score = clf_dt_pruned.predict_proba(X_test)
fpr_0, tpr_0, th_0 = roc_curve(y_test, y_score[:,1])
roc_auc_0 = auc(fpr_0, tpr_0)
#Entropy
clf_dt = DecisionTreeClassifier(criterion='entropy', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42)
clf_dt = clf_dt.fit(X_train, y_train)
path = clf_dt.cost_complexity_pruning_path(X_train, y_train)
ccp_alphas = path.ccp_alphas
ccp_alphas = ccp_alphas[:-1]
clf_dts=[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(criterion='entropy',random_state=0, ccp_alpha=ccp_alpha)
clf_dt.fit(X_train, y_train)
clf_dts.append(clf_dt)
train_scores = [clf_dt.score(X_train, y_train) for clf_dt in clf_dts]
test_scores = [clf_dt.score(X_test, y_test) for clf_dt in clf_dts]
alpha_loop_values =[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(criterion='entropy', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=0, ccp_alpha=ccp_alpha)
scores= cross_val_score(clf_dt,X_train,y_train, cv=10)
alpha_loop_values.append([ccp_alpha,np.mean(scores), np.std(scores)])
alpha_results = pd.DataFrame(alpha_loop_values,
columns=['alpha','mean_accuracy','std'])
indexmax = alpha_results[['mean_accuracy']].idxmax()
maxalpha=alpha_results.loc[indexmax,'alpha']
ideal_ccp_alpha = float(maxalpha)
print(ideal_ccp_alpha)
clf_dt_pruned = DecisionTreeClassifier(criterion='entropy', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42, ccp_alpha=ideal_ccp_alpha)
clf_dt_pruned = clf_dt_pruned.fit(X_train, y_train)
y_score = clf_dt_pruned.predict_proba(X_test)
fpr_en, tpr_en, th_en = roc_curve(y_test, y_score[:,1])
roc_auc_en = auc(fpr_en, tpr_en)
y_pred = clf_dt_pruned.predict(X_test)
print('Accuracy %s' % accuracy_score(y_test, y_pred))
print('F1-score %s' % f1_score(y_test, y_pred,average='weighted'))
print(classification_report(y_test, y_pred))
#KNN, find best score
acc = []
# Will take some time
for i in range(1,40):
neigh = KNeighborsClassifier(n_neighbors = i).fit(X_train,y_train)
yhat = neigh.predict(X_test)
acc.append(metrics.accuracy_score(y_test, yhat))
clf_knn = KNeighborsClassifier(n_neighbors=acc.index(max(acc)))
clf_knn.fit(X, y)
y_pred = clf_knn.predict(X_train)
y_score = clf_knn.predict_proba(X_test)
fpr_KNN, tpr_KNN, th_KNN = roc_curve(y_test, y_score[:,1])
roc_auc_KNN = auc(fpr_KNN, tpr_KNN)
y_pred = clf_knn.predict(X_test)
print('Accuracy %s' % accuracy_score(y_test, y_pred))
print('F1-score %s' % f1_score(y_test, y_pred,average='weighted'))
print(classification_report(y_test, y_pred))
# Instantiate model with 380 decision trees
model = RandomForestClassifier(n_estimators = 380, random_state = 42)
# Train the model on training data
ra=model.fit(X_train, y_train)
y_score = model.predict_proba(X_test)
fpr_RF, tpr_RF, th_RF = roc_curve(y_test, y_score[:,1])
roc_auc_RF = auc(fpr_RF, tpr_RF)
y_pred = model.predict(X_test)
print('Accuracy %s' % accuracy_score(y_test, y_pred))
print('F1-score %s' % f1_score(y_test, y_pred,average='weighted'))
print(classification_report(y_test, y_pred))
#Grid Search
clf_dt = DecisionTreeClassifier(criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42)
clf_dt = clf_dt.fit(X_train, y_train)
param_list = {'max_depth': [None] + [2, 3, 4,5,6,7],
'min_samples_split': [2, 5, 10, 15, 20,30,50,60,70,80,90,100],
'min_samples_leaf': [1, 5, 10, 20,25,30,40,50]
}
grid_search = GridSearchCV(clf_dt, param_grid=param_list, scoring='f1')
grid_search.fit(X, y)
res = grid_search.cv_results_
def report(results, n_top=3):
for i in range(1, n_top + 1):
candidates = np.flatnonzero(results['rank_test_score'] == i)
for candidate in candidates:
print("Model with rank: {0}".format(i))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
results['mean_test_score'][candidate],
results['std_test_score'][candidate]))
print("Parameters: {0}".format(results['params'][candidate]))
print("")
report(res, n_top=3)
clf_dt_pruned = DecisionTreeClassifier(criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=40,random_state=42)
clf_dt_pruned = clf_dt_pruned.fit(X_train, y_train)
y_score = clf_dt_pruned.predict_proba(X_test)
fpr_c, tpr_c, th_c = roc_curve(y_test, y_score[:,1])
roc_auc_c = auc(fpr_c, tpr_c)
y_pred = clf_dt_pruned.predict(X_test)
print('Accuracy %s' % accuracy_score(y_test, y_pred))
print('F1-score %s' % f1_score(y_test, y_pred,average='weighted'))
print(classification_report(y_test, y_pred))
plt.figure(figsize=(15,7.5))
from sklearn.tree import plot_tree
plot_tree(clf_dt_pruned,
filled=True,
rounded=True,
class_names=["not aroused","aroused"],
feature_names=X.columns)
plt.figure(figsize=(8,5))
plt.plot(fpr_0, tpr_0,lw=3,label='$GINI_{AUC}$ = %.3f' % (roc_auc_0))
plt.plot(fpr_en, tpr_en,lw=3,label='$ENT_{AUC}$ = %.3f' % (roc_auc_en))
plt.plot(fpr_KNN, tpr_KNN,lw=3,label='$KNN_{AUC}$ = %.3f' % (roc_auc_KNN))
plt.plot(fpr_RF, tpr_RF,lw=3,label='$RAF_{AUC}$ = %.3f' % (roc_auc_RF))
#plt.plot(fpr_c, tpr_c,lw=3,label='$GR_{AUC}$ = %.3f' % (roc_auc_c))
plt.legend(loc="lower right", fontsize=18, frameon=False)
plt.plot([0,1], [0,1], 'k--')
plt.xlabel('False Positive Rate', fontsize=20)
plt.ylabel('True Positive Rate', fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=22)
plt.show()
"""### Valence"""
refvar="valence"
taglio=0.67
X=df_class_ref.drop(refvar,axis=1).copy()
y=df_class_ref[refvar].copy()
y_up_index = y >= taglio
y[y_up_index]=1
y_zero_index = y < taglio
y[y_zero_index]=0
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
clf_dt = DecisionTreeClassifier(criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42)
clf_dt = clf_dt.fit(X_train, y_train)
path = clf_dt.cost_complexity_pruning_path(X_train, y_train)
ccp_alphas = path.ccp_alphas
ccp_alphas = ccp_alphas[:-1]
clf_dts=[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(random_state=0, ccp_alpha=ccp_alpha)
clf_dt.fit(X_train, y_train)
clf_dts.append(clf_dt)
train_scores = [clf_dt.score(X_train, y_train) for clf_dt in clf_dts]
test_scores = [clf_dt.score(X_test, y_test) for clf_dt in clf_dts]
fig, ax =plt.subplots()
ax.set_xlabel("alpha")
ax.set_ylabel("accuracy")
ax.set_title("Accuracy vs alpha for training and testing sets")
ax.plot(ccp_alphas,train_scores, marker ='o',label='train',drawstyle='steps-post')
ax.plot(ccp_alphas,test_scores, marker ='o',label='test',drawstyle='steps-post')
ax.legend()
plt.show()
alpha_loop_values =[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=0, ccp_alpha=ccp_alpha)
scores= cross_val_score(clf_dt,X_train,y_train, cv=10)
alpha_loop_values.append([ccp_alpha,np.mean(scores), np.std(scores)])
alpha_results = pd.DataFrame(alpha_loop_values,
columns=['alpha','mean_accuracy','std'])
alpha_results.plot(x='alpha',
y='mean_accuracy',
marker='o',
linestyle='--')
indexmax = alpha_results[['mean_accuracy']].idxmax()
maxalpha=alpha_results.loc[indexmax,'alpha']
ideal_ccp_alpha = float(maxalpha)
clf_dt_pruned = DecisionTreeClassifier(criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42, ccp_alpha=ideal_ccp_alpha)
clf_dt_pruned = clf_dt_pruned.fit(X_train, y_train)
plt.figure(figsize=(15,7.5))
clf_dt_pruned.classes_
from sklearn.tree import plot_tree
plot_tree(clf_dt_pruned,
filled=True,
rounded=True,
class_names=[str(v) for v in clf_dt_pruned.classes_],
feature_names=X.columns)
y_pred = clf_dt_pruned.predict(X_train)
y_pred = clf_dt_pruned.predict(X_test)
print('Accuracy %s' % accuracy_score(y_test, y_pred))
print('F1-score %s' % f1_score(y_test, y_pred,average='weighted'))
print(classification_report(y_test, y_pred))
y_score = clf_dt_pruned.predict_proba(X_test)
plot_confusion_matrix(clf_dt_pruned,
X_test,
y_test,
display_labels=['not val','val'])
fpr, tpr, th = roc_curve(y_test, y_score[:,1])
roc_auc = auc(fpr, tpr)
print(roc_auc)
plt.figure(figsize=(8,5))
plt.plot(fpr, tpr, label='$AUC$ = %.3f' % (roc_auc))
plt.legend(loc="lower right", fontsize=14, frameon=False)
plt.plot([0,1], [0,1], 'k--')
plt.xlabel('False Positive Rate', fontsize=20)
plt.ylabel('True Positive Rate', fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=22)
plt.show()
#ROC for Decision Tree (Gini)
fpr_0, tpr_0, th_0 = roc_curve(y_test, y_score[:,1])
roc_auc_0 = auc(fpr_0, tpr_0)
#Entropy
clf_dt = DecisionTreeClassifier(criterion='entropy', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42)
clf_dt = clf_dt.fit(X_train, y_train)
path = clf_dt.cost_complexity_pruning_path(X_train, y_train)
ccp_alphas = path.ccp_alphas
ccp_alphas = ccp_alphas[:-1]
clf_dts=[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(criterion='entropy',random_state=0, ccp_alpha=ccp_alpha)
clf_dt.fit(X_train, y_train)
clf_dts.append(clf_dt)
train_scores = [clf_dt.score(X_train, y_train) for clf_dt in clf_dts]
test_scores = [clf_dt.score(X_test, y_test) for clf_dt in clf_dts]
alpha_loop_values =[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(criterion='entropy', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=0, ccp_alpha=ccp_alpha)
scores= cross_val_score(clf_dt,X_train,y_train, cv=10)
alpha_loop_values.append([ccp_alpha,np.mean(scores), np.std(scores)])
alpha_results = pd.DataFrame(alpha_loop_values,
columns=['alpha','mean_accuracy','std'])
indexmax = alpha_results[['mean_accuracy']].idxmax()
maxalpha=alpha_results.loc[indexmax,'alpha']
ideal_ccp_alpha = float(maxalpha)
print(ideal_ccp_alpha)
clf_dt_pruned = DecisionTreeClassifier(criterion='entropy', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42, ccp_alpha=ideal_ccp_alpha)
clf_dt_pruned = clf_dt_pruned.fit(X_train, y_train)
y_score = clf_dt_pruned.predict_proba(X_test)
fpr_en, tpr_en, th_en = roc_curve(y_test, y_score[:,1])
roc_auc_en = auc(fpr_en, tpr_en)
y_pred = clf_dt_pruned.predict(X_test)
print(classification_report(y_test, y_pred))
#KNN, find best score
acc = []
# Will take some time
for i in range(1,40):
neigh = KNeighborsClassifier(n_neighbors = i).fit(X_train,y_train)
yhat = neigh.predict(X_test)
acc.append(metrics.accuracy_score(y_test, yhat))
clf_knn = KNeighborsClassifier(n_neighbors=acc.index(max(acc)))
clf_knn.fit(X, y)
y_pred = clf_knn.predict(X_train)
y_score = clf_knn.predict_proba(X_test)
fpr_KNN, tpr_KNN, th_KNN = roc_curve(y_test, y_score[:,1])
roc_auc_KNN = auc(fpr_KNN, tpr_KNN)
y_pred = clf_knn.predict(X_test)
print(classification_report(y_test, y_pred))
# Instantiate model with 380 decision trees
model = RandomForestClassifier(n_estimators = 380, random_state = 42)
# Train the model on training data
ra=model.fit(X_train, y_train)
y_score = model.predict_proba(X_test)
fpr_RF, tpr_RF, th_RF = roc_curve(y_test, y_score[:,1])
roc_auc_RF = auc(fpr_RF, tpr_RF)
y_pred = model.predict(X_test)
print(classification_report(y_test, y_pred))
plt.figure(figsize=(8,5))
plt.plot(fpr_0, tpr_0,lw=3,label='$GINI_{AUC}$ = %.3f' % (roc_auc_0))
plt.plot(fpr_en, tpr_en,lw=3,label='$ENT_{AUC}$ = %.3f' % (roc_auc_en))
plt.plot(fpr_KNN, tpr_KNN,lw=3,label='$KNN_{AUC}$ = %.3f' % (roc_auc_KNN))
plt.plot(fpr_RF, tpr_RF,lw=3,label='$RAF_{AUC}$ = %.3f' % (roc_auc_RF))
#plt.plot(fpr_c, tpr_c,lw=3,label='$GR_{AUC}$ = %.3f' % (roc_auc_c))
plt.legend(loc="lower right", fontsize=18, frameon=False)
plt.plot([0,1], [0,1], 'k--')
plt.xlabel('False Positive Rate', fontsize=20)
plt.ylabel('True Positive Rate', fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=22)
plt.show()
"""### Dominance
"""
refvar="dominance"
taglio=0.57
X=df_class_ref.drop(refvar,axis=1).copy()
y=df_class_ref[refvar].copy()
y_up_index = y >= taglio
y[y_up_index]=1
y_zero_index = y < taglio
y[y_zero_index]=0
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
clf_dt = DecisionTreeClassifier(criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42)
clf_dt = clf_dt.fit(X_train, y_train)
path = clf_dt.cost_complexity_pruning_path(X_train, y_train)
ccp_alphas = path.ccp_alphas
ccp_alphas = ccp_alphas[:-1]
clf_dts=[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(random_state=0, ccp_alpha=ccp_alpha)
clf_dt.fit(X_train, y_train)
clf_dts.append(clf_dt)
train_scores = [clf_dt.score(X_train, y_train) for clf_dt in clf_dts]
test_scores = [clf_dt.score(X_test, y_test) for clf_dt in clf_dts]
fig, ax =plt.subplots()
ax.set_xlabel("alpha")
ax.set_ylabel("accuracy")
ax.set_title("Accuracy vs alpha for training and testing sets")
ax.plot(ccp_alphas,train_scores, marker ='o',label='train',drawstyle='steps-post')
ax.plot(ccp_alphas,test_scores, marker ='o',label='test',drawstyle='steps-post')
ax.legend()
plt.show()
alpha_loop_values =[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(random_state=42, ccp_alpha=ccp_alpha)
scores= cross_val_score(clf_dt,X_train,y_train, cv=10)
alpha_loop_values.append([ccp_alpha,np.mean(scores), np.std(scores)])
alpha_results = pd.DataFrame(alpha_loop_values,
columns=['alpha','mean_accuracy','std'])
alpha_results.plot(x='alpha',
y='mean_accuracy',
marker='o',
linestyle='--')
indexmax = alpha_results[['mean_accuracy']].idxmax()
maxalpha=alpha_results.loc[indexmax,'alpha']
ideal_ccp_alpha = float(maxalpha)
clf_dt_pruned = DecisionTreeClassifier(criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42, ccp_alpha=ideal_ccp_alpha)
clf_dt_pruned = clf_dt_pruned.fit(X_train, y_train)
plot_confusion_matrix(clf_dt_pruned,
X_test,
y_test,
display_labels=['not dominant','dominant'])
plt.figure(figsize=(15,7.5))
from sklearn.tree import plot_tree
plot_tree(clf_dt_pruned,
filled=True,
rounded=True,
class_names=["not dominant","dominant"],
feature_names=X.columns)
y_pred = clf_dt_pruned.predict(X_train)
y_pred = clf_dt_pruned.predict(X_test)
print('Accuracy %s' % accuracy_score(y_test, y_pred))
print('F1-score %s' % f1_score(y_test, y_pred))
print(classification_report(y_test, y_pred))
y_score = clf_dt_pruned.predict_proba(X_test)
fpr, tpr, th = roc_curve(y_test, y_score[:,1])
roc_auc = auc(fpr, tpr)
print(roc_auc)
plt.figure(figsize=(8,5))
plt.plot(fpr, tpr, label='$AUC$ = %.3f' % (roc_auc))
plt.legend(loc="lower right", fontsize=14, frameon=False)
plt.plot([0,1], [0,1], 'k--')
plt.xlabel('False Positive Rate', fontsize=20)
plt.ylabel('True Positive Rate', fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=22)
plt.show()
#ROC for Decision Tree (Gini)
fpr_0, tpr_0, th_0 = roc_curve(y_test, y_score[:,1])
roc_auc_0 = auc(fpr_0, tpr_0)
#Entropy
clf_dt = DecisionTreeClassifier(criterion='entropy', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42)
clf_dt = clf_dt.fit(X_train, y_train)
path = clf_dt.cost_complexity_pruning_path(X_train, y_train)
ccp_alphas = path.ccp_alphas
ccp_alphas = ccp_alphas[:-1]
clf_dts=[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(criterion='entropy',random_state=0, ccp_alpha=ccp_alpha)
clf_dt.fit(X_train, y_train)
clf_dts.append(clf_dt)
train_scores = [clf_dt.score(X_train, y_train) for clf_dt in clf_dts]
test_scores = [clf_dt.score(X_test, y_test) for clf_dt in clf_dts]
alpha_loop_values =[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(criterion='entropy', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=0, ccp_alpha=ccp_alpha)
scores= cross_val_score(clf_dt,X_train,y_train, cv=10)
alpha_loop_values.append([ccp_alpha,np.mean(scores), np.std(scores)])
alpha_results = pd.DataFrame(alpha_loop_values,
columns=['alpha','mean_accuracy','std'])
indexmax = alpha_results[['mean_accuracy']].idxmax()
maxalpha=alpha_results.loc[indexmax,'alpha']
ideal_ccp_alpha = float(maxalpha)
print(ideal_ccp_alpha)
clf_dt_pruned = DecisionTreeClassifier(criterion='entropy', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42, ccp_alpha=ideal_ccp_alpha)
clf_dt_pruned = clf_dt_pruned.fit(X_train, y_train)
y_score = clf_dt_pruned.predict_proba(X_test)
fpr_en, tpr_en, th_en = roc_curve(y_test, y_score[:,1])
roc_auc_en = auc(fpr_en, tpr_en)
y_pred = clf_dt_pruned.predict(X_test)
print(classification_report(y_test, y_pred))
#KNN, find best score
acc = []
# Will take some time
for i in range(1,40):
neigh = KNeighborsClassifier(n_neighbors = i).fit(X_train,y_train)
yhat = neigh.predict(X_test)
acc.append(metrics.accuracy_score(y_test, yhat))
clf_knn = KNeighborsClassifier(n_neighbors=acc.index(max(acc)))
clf_knn.fit(X, y)
y_pred = clf_knn.predict(X_train)
y_score = clf_knn.predict_proba(X_test)
fpr_KNN, tpr_KNN, th_KNN = roc_curve(y_test, y_score[:,1])
roc_auc_KNN = auc(fpr_KNN, tpr_KNN)
y_pred = clf_knn.predict(X_test)
print(classification_report(y_test, y_pred))
# Instantiate model with 380 decision trees
model = RandomForestClassifier(n_estimators = 380, random_state = 42)
# Train the model on training data
ra=model.fit(X_train, y_train)
y_score = model.predict_proba(X_test)
fpr_RF, tpr_RF, th_RF = roc_curve(y_test, y_score[:,1])
roc_auc_RF = auc(fpr_RF, tpr_RF)
y_pred = model.predict(X_test)
print(classification_report(y_test, y_pred))
plt.figure(figsize=(8,5))
plt.plot(fpr_0, tpr_0,lw=3,label='$GINI_{AUC}$ = %.3f' % (roc_auc_0))
plt.plot(fpr_en, tpr_en,lw=3,label='$ENT_{AUC}$ = %.3f' % (roc_auc_en))
plt.plot(fpr_KNN, tpr_KNN,lw=3,label='$KNN_{AUC}$ = %.3f' % (roc_auc_KNN))
plt.plot(fpr_RF, tpr_RF,lw=3,label='$RAF_{AUC}$ = %.3f' % (roc_auc_RF))
#plt.plot(fpr_c, tpr_c,lw=3,label='$GR_{AUC}$ = %.3f' % (roc_auc_c))
plt.legend(loc="lower right", fontsize=18, frameon=False)
plt.plot([0,1], [0,1], 'k--')
plt.xlabel('False Positive Rate', fontsize=20)
plt.ylabel('True Positive Rate', fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=22)
plt.show()
"""### Familiarity"""
refvar="familiarity"
taglio=0.6
X=df_class_ref.drop(refvar,axis=1).copy()
y=df_class_ref[refvar].copy()
y_up_index = y >= taglio
y[y_up_index]=1
y_zero_index = y < taglio
y[y_zero_index]=0
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
clf_dt = DecisionTreeClassifier(criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42)
clf_dt = clf_dt.fit(X_train, y_train)
path = clf_dt.cost_complexity_pruning_path(X_train, y_train)
ccp_alphas = path.ccp_alphas
ccp_alphas = ccp_alphas[:-1]
clf_dts=[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(random_state=0, ccp_alpha=ccp_alpha)
clf_dt.fit(X_train, y_train)
clf_dts.append(clf_dt)
train_scores = [clf_dt.score(X_train, y_train) for clf_dt in clf_dts]
test_scores = [clf_dt.score(X_test, y_test) for clf_dt in clf_dts]
fig, ax =plt.subplots()
ax.set_xlabel("alpha")
ax.set_ylabel("accuracy")
ax.set_title("Accuracy vs alpha for training and testing sets")
ax.plot(ccp_alphas,train_scores, marker ='o',label='train',drawstyle='steps-post')
ax.plot(ccp_alphas,test_scores, marker ='o',label='test',drawstyle='steps-post')
ax.legend()
plt.show()
alpha_loop_values =[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=0, ccp_alpha=ccp_alpha)
scores= cross_val_score(clf_dt,X_train,y_train, cv=10)
alpha_loop_values.append([ccp_alpha,np.mean(scores), np.std(scores)])
alpha_results = pd.DataFrame(alpha_loop_values,
columns=['alpha','mean_accuracy','std'])
alpha_results.plot(x='alpha',
y='mean_accuracy',
marker='o',
linestyle='--')
indexmax = alpha_results[['mean_accuracy']].idxmax()
maxalpha=alpha_results.loc[indexmax,'alpha']
ideal_ccp_alpha = float(maxalpha)
clf_dt_pruned = DecisionTreeClassifier(criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42, ccp_alpha=ideal_ccp_alpha)
clf_dt_pruned = clf_dt_pruned.fit(X_train, y_train)
plot_confusion_matrix(clf_dt_pruned,
X_test,
y_test,
display_labels=['not dominant','dominant'])
plt.figure(figsize=(15,7.5))
from sklearn.tree import plot_tree
plot_tree(clf_dt_pruned,
filled=True,
rounded=True,
class_names=["not familiar","familiar"],
feature_names=X.columns,
max_depth=3,
fontsize=7)
y_pred = clf_dt_pruned.predict(X_train)
y_pred = clf_dt_pruned.predict(X_test)
plt.savefig('plot_of_tree.pdf')
print('Accuracy %s' % accuracy_score(y_test, y_pred))
print('F1-score %s' % f1_score(y_test, y_pred))
print(classification_report(y_test, y_pred))
y_score = clf_dt_pruned.predict_proba(X_test)
fpr, tpr, th = roc_curve(y_test, y_score[:,1])
roc_auc = auc(fpr, tpr)
print(roc_auc)
plt.figure(figsize=(8,5))
plt.plot(fpr, tpr, label='$AUC$ = %.3f' % (roc_auc))
plt.legend(loc="lower right", fontsize=14, frameon=False)
plt.plot([0,1], [0,1], 'k--')
plt.xlabel('False Positive Rate', fontsize=20)
plt.ylabel('True Positive Rate', fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=22)
plt.show()
#ROC for Decision Tree (Gini)
fpr_0, tpr_0, th_0 = roc_curve(y_test, y_score[:,1])
roc_auc_0 = auc(fpr_0, tpr_0)
#Entropy
clf_dt = DecisionTreeClassifier(criterion='entropy', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42)
clf_dt = clf_dt.fit(X_train, y_train)
path = clf_dt.cost_complexity_pruning_path(X_train, y_train)
ccp_alphas = path.ccp_alphas
ccp_alphas = ccp_alphas[:-1]
clf_dts=[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(criterion='entropy',random_state=0, ccp_alpha=ccp_alpha)
clf_dt.fit(X_train, y_train)
clf_dts.append(clf_dt)
train_scores = [clf_dt.score(X_train, y_train) for clf_dt in clf_dts]
test_scores = [clf_dt.score(X_test, y_test) for clf_dt in clf_dts]
alpha_loop_values =[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(criterion='entropy', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=0, ccp_alpha=ccp_alpha)
scores= cross_val_score(clf_dt,X_train,y_train, cv=10)
alpha_loop_values.append([ccp_alpha,np.mean(scores), np.std(scores)])
alpha_results = pd.DataFrame(alpha_loop_values,
columns=['alpha','mean_accuracy','std'])
indexmax = alpha_results[['mean_accuracy']].idxmax()
maxalpha=alpha_results.loc[indexmax,'alpha']
ideal_ccp_alpha = float(maxalpha)
print(ideal_ccp_alpha)
clf_dt_pruned = DecisionTreeClassifier(criterion='entropy', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42, ccp_alpha=ideal_ccp_alpha)
clf_dt_pruned = clf_dt_pruned.fit(X_train, y_train)
y_score = clf_dt_pruned.predict_proba(X_test)
fpr_en, tpr_en, th_en = roc_curve(y_test, y_score[:,1])
roc_auc_en = auc(fpr_en, tpr_en)
y_pred = clf_dt_pruned.predict(X_test)
print(classification_report(y_test, y_pred))
plt.figure(figsize=(15,7.5))
from sklearn.tree import plot_tree
plot_tree(clf_dt_pruned,
filled=True,
rounded=True,
class_names=["not familiar","familiar"],
feature_names=X.columns,
max_depth=3,
fontsize=7)
#KNN, find best score
acc = []
# Will take some time
for i in range(1,40):
neigh = KNeighborsClassifier(n_neighbors = i).fit(X_train,y_train)
yhat = neigh.predict(X_test)
acc.append(metrics.accuracy_score(y_test, yhat))
clf_knn = KNeighborsClassifier(n_neighbors=acc.index(max(acc)))
clf_knn.fit(X, y)
y_pred = clf_knn.predict(X_train)
y_score = clf_knn.predict_proba(X_test)
fpr_KNN, tpr_KNN, th_KNN = roc_curve(y_test, y_score[:,1])
roc_auc_KNN = auc(fpr_KNN, tpr_KNN)
y_pred = clf_knn.predict(X_test)
print(classification_report(y_test, y_pred))
# Instantiate model with 380 decision trees
model = RandomForestClassifier(n_estimators = 380, random_state = 42)
# Train the model on training data
ra=model.fit(X_train, y_train)
y_score = model.predict_proba(X_test)
fpr_RF, tpr_RF, th_RF = roc_curve(y_test, y_score[:,1])
roc_auc_RF = auc(fpr_RF, tpr_RF)
y_pred = model.predict(X_test)
print(classification_report(y_test, y_pred))
plt.figure(figsize=(8,5))
plt.plot(fpr_0, tpr_0,lw=3,label='$GINI_{AUC}$ = %.3f' % (roc_auc_0))
plt.plot(fpr_en, tpr_en,lw=3,label='$ENT_{AUC}$ = %.3f' % (roc_auc_en))
plt.plot(fpr_KNN, tpr_KNN,lw=3,label='$KNN_{AUC}$ = %.3f' % (roc_auc_KNN))
plt.plot(fpr_RF, tpr_RF,lw=3,label='$RAF_{AUC}$ = %.3f' % (roc_auc_RF))
#plt.plot(fpr_c, tpr_c,lw=3,label='$GR_{AUC}$ = %.3f' % (roc_auc_c))
plt.legend(loc="lower right", fontsize=18, frameon=False)
plt.plot([0,1], [0,1], 'k--')
plt.xlabel('False Positive Rate', fontsize=20)
plt.ylabel('True Positive Rate', fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=22)
plt.show()
"""### Semsize"""
refvar="semsize"
taglio=0.63
X=df_class_ref.drop(refvar,axis=1).copy()
y=df_class_ref[refvar].copy()
y_up_index = y >= taglio
y[y_up_index]=1
y_zero_index = y < taglio
y[y_zero_index]=0
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
clf_dt = DecisionTreeClassifier(criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42)
clf_dt = clf_dt.fit(X_train, y_train)
path = clf_dt.cost_complexity_pruning_path(X_train, y_train)
ccp_alphas = path.ccp_alphas
ccp_alphas = ccp_alphas[:-1]
clf_dts=[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(random_state=0, ccp_alpha=ccp_alpha)
clf_dt.fit(X_train, y_train)
clf_dts.append(clf_dt)
train_scores = [clf_dt.score(X_train, y_train) for clf_dt in clf_dts]
test_scores = [clf_dt.score(X_test, y_test) for clf_dt in clf_dts]
fig, ax =plt.subplots()
ax.set_xlabel("alpha")
ax.set_ylabel("accuracy")
ax.set_title("Accuracy vs alpha for training and testing sets")
ax.plot(ccp_alphas,train_scores, marker ='o',label='train',drawstyle='steps-post')
ax.plot(ccp_alphas,test_scores, marker ='o',label='test',drawstyle='steps-post')
ax.legend()
plt.show()
alpha_loop_values =[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=0, ccp_alpha=ccp_alpha)
scores= cross_val_score(clf_dt,X_train,y_train, cv=10)
alpha_loop_values.append([ccp_alpha,np.mean(scores), np.std(scores)])
alpha_results = pd.DataFrame(alpha_loop_values,
columns=['alpha','mean_accuracy','std'])
alpha_results.plot(x='alpha',
y='mean_accuracy',
marker='o',
linestyle='--')
indexmax = alpha_results[['mean_accuracy']].idxmax()
maxalpha=alpha_results.loc[indexmax,'alpha']
ideal_ccp_alpha = float(maxalpha)
clf_dt_pruned = DecisionTreeClassifier(criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42, ccp_alpha=ideal_ccp_alpha)
clf_dt_pruned = clf_dt_pruned.fit(X_train, y_train)
plot_confusion_matrix(clf_dt_pruned,
X_test,
y_test,
display_labels=['small','big'])
plt.figure(figsize=(15,7.5))
from sklearn.tree import plot_tree
plot_tree(clf_dt_pruned,
filled=True,
rounded=True,
class_names=["small","big"],
feature_names=X.columns)
y_pred = clf_dt_pruned.predict(X_train)
y_pred = clf_dt_pruned.predict(X_test)
print('Accuracy %s' % accuracy_score(y_test, y_pred))
print('F1-score %s' % f1_score(y_test, y_pred, average=None))
print(classification_report(y_test, y_pred))
y_score = clf_dt_pruned.predict_proba(X_test)
fpr, tpr, th = roc_curve(y_test, y_score[:,1])
roc_auc = auc(fpr, tpr)
print(roc_auc)
plt.figure(figsize=(8,5))
plt.plot(fpr, tpr, label='$AUC$ = %.3f' % (roc_auc))
plt.legend(loc="lower right", fontsize=14, frameon=False)
plt.plot([0,1], [0,1], 'k--')
plt.xlabel('False Positive Rate', fontsize=20)
plt.ylabel('True Positive Rate', fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=22)
plt.show()
#ROC for Decision Tree (Gini)
fpr_0, tpr_0, th_0 = roc_curve(y_test, y_score[:,1])
roc_auc_0 = auc(fpr_0, tpr_0)
#Entropy
clf_dt = DecisionTreeClassifier(criterion='entropy', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42)
clf_dt = clf_dt.fit(X_train, y_train)
path = clf_dt.cost_complexity_pruning_path(X_train, y_train)
ccp_alphas = path.ccp_alphas
ccp_alphas = ccp_alphas[:-1]
clf_dts=[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(criterion='entropy',random_state=0, ccp_alpha=ccp_alpha)
clf_dt.fit(X_train, y_train)
clf_dts.append(clf_dt)
train_scores = [clf_dt.score(X_train, y_train) for clf_dt in clf_dts]
test_scores = [clf_dt.score(X_test, y_test) for clf_dt in clf_dts]
alpha_loop_values =[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(criterion='entropy', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=0, ccp_alpha=ccp_alpha)
scores= cross_val_score(clf_dt,X_train,y_train, cv=10)
alpha_loop_values.append([ccp_alpha,np.mean(scores), np.std(scores)])
alpha_results = pd.DataFrame(alpha_loop_values,
columns=['alpha','mean_accuracy','std'])
indexmax = alpha_results[['mean_accuracy']].idxmax()
maxalpha=alpha_results.loc[indexmax,'alpha']
ideal_ccp_alpha = float(maxalpha)
print(ideal_ccp_alpha)
clf_dt_pruned = DecisionTreeClassifier(criterion='entropy', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42, ccp_alpha=ideal_ccp_alpha)
clf_dt_pruned = clf_dt_pruned.fit(X_train, y_train)
y_score = clf_dt_pruned.predict_proba(X_test)
fpr_en, tpr_en, th_en = roc_curve(y_test, y_score[:,1])
roc_auc_en = auc(fpr_en, tpr_en)
y_pred = clf_dt_pruned.predict(X_test)
print(classification_report(y_test, y_pred))
#KNN, find best score
acc = []
# Will take some time
for i in range(1,40):
neigh = KNeighborsClassifier(n_neighbors = i).fit(X_train,y_train)
yhat = neigh.predict(X_test)
acc.append(metrics.accuracy_score(y_test, yhat))
clf_knn = KNeighborsClassifier(n_neighbors=acc.index(max(acc)))
clf_knn.fit(X, y)
y_pred = clf_knn.predict(X_train)
y_score = clf_knn.predict_proba(X_test)
fpr_KNN, tpr_KNN, th_KNN = roc_curve(y_test, y_score[:,1])
roc_auc_KNN = auc(fpr_KNN, tpr_KNN)
y_pred = clf_knn.predict(X_test)
print(classification_report(y_test, y_pred))
# Instantiate model with 380 decision trees
model = RandomForestClassifier(n_estimators = 380, random_state = 42)
# Train the model on training data
ra=model.fit(X_train, y_train)
y_score = model.predict_proba(X_test)
fpr_RF, tpr_RF, th_RF = roc_curve(y_test, y_score[:,1])
roc_auc_RF = auc(fpr_RF, tpr_RF)
y_pred = model.predict(X_test)
print(classification_report(y_test, y_pred))
plt.figure(figsize=(8,5))
plt.plot(fpr_0, tpr_0,lw=3,label='$GINI_{AUC}$ = %.3f' % (roc_auc_0))
plt.plot(fpr_en, tpr_en,lw=3,label='$ENT_{AUC}$ = %.3f' % (roc_auc_en))
plt.plot(fpr_KNN, tpr_KNN,lw=3,label='$KNN_{AUC}$ = %.3f' % (roc_auc_KNN))
plt.plot(fpr_RF, tpr_RF,lw=3,label='$RAF_{AUC}$ = %.3f' % (roc_auc_RF))
#plt.plot(fpr_c, tpr_c,lw=3,label='$GR_{AUC}$ = %.3f' % (roc_auc_c))
plt.legend(loc="lower right", fontsize=18, frameon=False)
plt.plot([0,1], [0,1], 'k--')
plt.xlabel('False Positive Rate', fontsize=20)
plt.ylabel('True Positive Rate', fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=22)
plt.show()
"""### Masculinity"""
refvar="masculinity"
taglio=0.6
X=df_class_ref.drop(refvar,axis=1).copy()
y=df_class_ref[refvar].copy()
y_up_index = y >= taglio
y[y_up_index]=1
y_zero_index = y < taglio
y[y_zero_index]=0
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
clf_dt = DecisionTreeClassifier(criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42)
clf_dt = clf_dt.fit(X_train, y_train)
path = clf_dt.cost_complexity_pruning_path(X_train, y_train)
ccp_alphas = path.ccp_alphas
ccp_alphas = ccp_alphas[:-1]
clf_dts=[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(random_state=0, ccp_alpha=ccp_alpha)
clf_dt.fit(X_train, y_train)
clf_dts.append(clf_dt)
train_scores = [clf_dt.score(X_train, y_train) for clf_dt in clf_dts]
test_scores = [clf_dt.score(X_test, y_test) for clf_dt in clf_dts]
fig, ax =plt.subplots()
ax.set_xlabel("alpha")
ax.set_ylabel("accuracy")
ax.set_title("Accuracy vs alpha for training and testing sets")
ax.plot(ccp_alphas,train_scores, marker ='o',label='train',drawstyle='steps-post')
ax.plot(ccp_alphas,test_scores, marker ='o',label='test',drawstyle='steps-post')
ax.legend()
plt.show()
alpha_loop_values =[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=0, ccp_alpha=ccp_alpha)
scores= cross_val_score(clf_dt,X_train,y_train, cv=10)
alpha_loop_values.append([ccp_alpha,np.mean(scores), np.std(scores)])
alpha_results = pd.DataFrame(alpha_loop_values,
columns=['alpha','mean_accuracy','std'])
alpha_results.plot(x='alpha',
y='mean_accuracy',
marker='o',
linestyle='--')
indexmax = alpha_results[['mean_accuracy']].idxmax()
maxalpha=alpha_results.loc[indexmax,'alpha']
ideal_ccp_alpha = float(maxalpha)
clf_dt_pruned = DecisionTreeClassifier(criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42, ccp_alpha=ideal_ccp_alpha)
clf_dt_pruned = clf_dt_pruned.fit(X_train, y_train)
plot_confusion_matrix(clf_dt_pruned,
X_test,
y_test,
display_labels=['feminine','masculine'])
plt.figure(figsize=(15,7.5))
from sklearn.tree import plot_tree
plot_tree(clf_dt_pruned,
filled=True,
rounded=True,
class_names=["feminine","masculine"],
feature_names=X.columns)
y_pred = clf_dt_pruned.predict(X_train)
y_pred = clf_dt_pruned.predict(X_test)
print('Accuracy %s' % accuracy_score(y_test, y_pred))
print('F1-score %s' % f1_score(y_test, y_pred))
print(classification_report(y_test, y_pred))
y_score = clf_dt_pruned.predict_proba(X_test)
fpr, tpr, th = roc_curve(y_test, y_score[:,1])
roc_auc = auc(fpr, tpr)
print(roc_auc)
plt.figure(figsize=(8,5))
plt.plot(fpr, tpr, label='$AUC$ = %.3f' % (roc_auc))
plt.legend(loc="lower right", fontsize=14, frameon=False)
plt.plot([0,1], [0,1], 'k--')
plt.xlabel('False Positive Rate', fontsize=20)
plt.ylabel('True Positive Rate', fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=22)
plt.show()
#ROC for Decision Tree (Gini)
fpr_0, tpr_0, th_0 = roc_curve(y_test, y_score[:,1])
roc_auc_0 = auc(fpr_0, tpr_0)
#Entropy
clf_dt = DecisionTreeClassifier(criterion='entropy', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42)
clf_dt = clf_dt.fit(X_train, y_train)
path = clf_dt.cost_complexity_pruning_path(X_train, y_train)
ccp_alphas = path.ccp_alphas
ccp_alphas = ccp_alphas[:-1]
clf_dts=[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(criterion='entropy',random_state=0, ccp_alpha=ccp_alpha)
clf_dt.fit(X_train, y_train)
clf_dts.append(clf_dt)
train_scores = [clf_dt.score(X_train, y_train) for clf_dt in clf_dts]
test_scores = [clf_dt.score(X_test, y_test) for clf_dt in clf_dts]
alpha_loop_values =[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(criterion='entropy', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=0, ccp_alpha=ccp_alpha)
scores= cross_val_score(clf_dt,X_train,y_train, cv=10)
alpha_loop_values.append([ccp_alpha,np.mean(scores), np.std(scores)])
alpha_results = pd.DataFrame(alpha_loop_values,
columns=['alpha','mean_accuracy','std'])
indexmax = alpha_results[['mean_accuracy']].idxmax()
maxalpha=alpha_results.loc[indexmax,'alpha']
ideal_ccp_alpha = float(maxalpha)
print(ideal_ccp_alpha)
clf_dt_pruned = DecisionTreeClassifier(criterion='entropy', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42, ccp_alpha=ideal_ccp_alpha)
clf_dt_pruned = clf_dt_pruned.fit(X_train, y_train)
y_score = clf_dt_pruned.predict_proba(X_test)
fpr_en, tpr_en, th_en = roc_curve(y_test, y_score[:,1])
roc_auc_en = auc(fpr_en, tpr_en)
y_pred = clf_dt_pruned.predict(X_test)
print(classification_report(y_test, y_pred))
#KNN, find best score
acc = []
# Will take some time
for i in range(1,40):
neigh = KNeighborsClassifier(n_neighbors = i).fit(X_train,y_train)
yhat = neigh.predict(X_test)
acc.append(metrics.accuracy_score(y_test, yhat))
clf_knn = KNeighborsClassifier(n_neighbors=acc.index(max(acc)))
clf_knn.fit(X, y)
y_pred = clf_knn.predict(X_train)
y_score = clf_knn.predict_proba(X_test)
fpr_KNN, tpr_KNN, th_KNN = roc_curve(y_test, y_score[:,1])
roc_auc_KNN = auc(fpr_KNN, tpr_KNN)
y_pred = clf_knn.predict(X_test)
print(classification_report(y_test, y_pred))
# Instantiate model with 380 decision trees
model = RandomForestClassifier(n_estimators = 380, random_state = 42)
# Train the model on training data
ra=model.fit(X_train, y_train)
y_score = model.predict_proba(X_test)
fpr_RF, tpr_RF, th_RF = roc_curve(y_test, y_score[:,1])
roc_auc_RF = auc(fpr_RF, tpr_RF)
y_pred = model.predict(X_test)
print(classification_report(y_test, y_pred))
plt.figure(figsize=(8,5))
plt.plot(fpr_0, tpr_0,lw=3,label='$GINI_{AUC}$ = %.3f' % (roc_auc_0))
plt.plot(fpr_en, tpr_en,lw=3,label='$ENT_{AUC}$ = %.3f' % (roc_auc_en))
plt.plot(fpr_KNN, tpr_KNN,lw=3,label='$KNN_{AUC}$ = %.3f' % (roc_auc_KNN))
plt.plot(fpr_RF, tpr_RF,lw=3,label='$RAF_{AUC}$ = %.3f' % (roc_auc_RF))
#plt.plot(fpr_c, tpr_c,lw=3,label='$GR_{AUC}$ = %.3f' % (roc_auc_c))
plt.legend(loc="lower right", fontsize=18, frameon=False)
plt.plot([0,1], [0,1], 'k--')
plt.xlabel('False Positive Rate', fontsize=20)
plt.ylabel('True Positive Rate', fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=22)
plt.show()
"""### Polysemy"""
refvar="polysemy"
taglio=0.63
X=df_class_ref.drop(refvar,axis=1).copy()
y=df_class_ref[refvar].copy()
y_up_index = y >= taglio
y[y_up_index]=1
y_zero_index = y < taglio
y[y_zero_index]=0
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
clf_dt = DecisionTreeClassifier(criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42)
clf_dt = clf_dt.fit(X_train, y_train)
path = clf_dt.cost_complexity_pruning_path(X_train, y_train)
ccp_alphas = path.ccp_alphas
ccp_alphas = ccp_alphas[:-1]
clf_dts=[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(random_state=0, ccp_alpha=ccp_alpha)
clf_dt.fit(X_train, y_train)
clf_dts.append(clf_dt)
train_scores = [clf_dt.score(X_train, y_train) for clf_dt in clf_dts]
test_scores = [clf_dt.score(X_test, y_test) for clf_dt in clf_dts]
fig, ax =plt.subplots()
ax.set_xlabel("alpha")
ax.set_ylabel("accuracy")
ax.set_title("Accuracy vs alpha for training and testing sets")
ax.plot(ccp_alphas,train_scores, marker ='o',label='train',drawstyle='steps-post')
ax.plot(ccp_alphas,test_scores, marker ='o',label='test',drawstyle='steps-post')
ax.legend()
plt.show()
alpha_loop_values =[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=0, ccp_alpha=ccp_alpha)
scores= cross_val_score(clf_dt,X_train,y_train, cv=10)
alpha_loop_values.append([ccp_alpha,np.mean(scores), np.std(scores)])
alpha_results = pd.DataFrame(alpha_loop_values,
columns=['alpha','mean_accuracy','std'])
alpha_results.plot(x='alpha',
y='mean_accuracy',
marker='o',
linestyle='--')
indexmax = alpha_results[['mean_accuracy']].idxmax()
maxalpha=alpha_results.loc[indexmax,'alpha']
ideal_ccp_alpha = float(maxalpha)
clf_dt_pruned = DecisionTreeClassifier(criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42, ccp_alpha=ideal_ccp_alpha)
clf_dt_pruned = clf_dt_pruned.fit(X_train, y_train)
plot_confusion_matrix(clf_dt_pruned,
X_test,
y_test,
display_labels=['not pol','pol'])
plt.figure(figsize=(15,7.5))
from sklearn.tree import plot_tree
plot_tree(clf_dt_pruned,
filled=True,
rounded=True,
class_names=["not pol","pol"],
feature_names=X.columns)
y_pred = clf_dt_pruned.predict(X_train)
y_pred = clf_dt_pruned.predict(X_test)
print('Accuracy %s' % accuracy_score(y_test, y_pred))
print('F1-score %s' % f1_score(y_test, y_pred,average='weighted'))
print(classification_report(y_test, y_pred))
y_score = clf_dt_pruned.predict_proba(X_test)
fpr, tpr, th = roc_curve(y_test, y_score[:,1])
roc_auc = auc(fpr, tpr)
print(roc_auc)
plt.figure(figsize=(8,5))
plt.plot(fpr, tpr, label='$AUC$ = %.3f' % (roc_auc))
plt.legend(loc="lower right", fontsize=14, frameon=False)
plt.plot([0,1], [0,1], 'k--')
plt.xlabel('False Positive Rate', fontsize=20)
plt.ylabel('True Positive Rate', fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=22)
plt.show()
#ROC for Decision Tree (Gini)
fpr_0, tpr_0, th_0 = roc_curve(y_test, y_score[:,1])
roc_auc_0 = auc(fpr_0, tpr_0)
#Entropy
clf_dt = DecisionTreeClassifier(criterion='entropy', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42)
clf_dt = clf_dt.fit(X_train, y_train)
path = clf_dt.cost_complexity_pruning_path(X_train, y_train)
ccp_alphas = path.ccp_alphas
ccp_alphas = ccp_alphas[:-1]
clf_dts=[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(criterion='entropy',random_state=0, ccp_alpha=ccp_alpha)
clf_dt.fit(X_train, y_train)
clf_dts.append(clf_dt)
train_scores = [clf_dt.score(X_train, y_train) for clf_dt in clf_dts]
test_scores = [clf_dt.score(X_test, y_test) for clf_dt in clf_dts]
alpha_loop_values =[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(criterion='entropy', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=0, ccp_alpha=ccp_alpha)
scores= cross_val_score(clf_dt,X_train,y_train, cv=10)
alpha_loop_values.append([ccp_alpha,np.mean(scores), np.std(scores)])
alpha_results = pd.DataFrame(alpha_loop_values,
columns=['alpha','mean_accuracy','std'])
indexmax = alpha_results[['mean_accuracy']].idxmax()
maxalpha=alpha_results.loc[indexmax,'alpha']
ideal_ccp_alpha = float(maxalpha)
print(ideal_ccp_alpha)
clf_dt_pruned = DecisionTreeClassifier(criterion='entropy', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42, ccp_alpha=ideal_ccp_alpha)
clf_dt_pruned = clf_dt_pruned.fit(X_train, y_train)
y_score = clf_dt_pruned.predict_proba(X_test)
fpr_en, tpr_en, th_en = roc_curve(y_test, y_score[:,1])
roc_auc_en = auc(fpr_en, tpr_en)
y_pred = clf_dt_pruned.predict(X_test)
print(classification_report(y_test, y_pred))
#KNN, find best score
acc = []
# Will take some time
for i in range(1,40):
neigh = KNeighborsClassifier(n_neighbors = i).fit(X_train,y_train)
yhat = neigh.predict(X_test)
acc.append(metrics.accuracy_score(y_test, yhat))
clf_knn = KNeighborsClassifier(n_neighbors=acc.index(max(acc)))
clf_knn.fit(X, y)
y_pred = clf_knn.predict(X_train)
y_score = clf_knn.predict_proba(X_test)
fpr_KNN, tpr_KNN, th_KNN = roc_curve(y_test, y_score[:,1])
roc_auc_KNN = auc(fpr_KNN, tpr_KNN)
y_pred = clf_knn.predict(X_test)
print(classification_report(y_test, y_pred))
# Instantiate model with 380 decision trees
model = RandomForestClassifier(n_estimators = 380, random_state = 42)
# Train the model on training data
ra=model.fit(X_train, y_train)
y_score = model.predict_proba(X_test)
fpr_RF, tpr_RF, th_RF = roc_curve(y_test, y_score[:,1])
roc_auc_RF = auc(fpr_RF, tpr_RF)
y_pred = model.predict(X_test)
print(classification_report(y_test, y_pred))
plt.figure(figsize=(8,5))
plt.plot(fpr_0, tpr_0,lw=3,label='$GINI_{AUC}$ = %.3f' % (roc_auc_0))
plt.plot(fpr_en, tpr_en,lw=3,label='$ENT_{AUC}$ = %.3f' % (roc_auc_en))
plt.plot(fpr_KNN, tpr_KNN,lw=3,label='$KNN_{AUC}$ = %.3f' % (roc_auc_KNN))
plt.plot(fpr_RF, tpr_RF,lw=3,label='$RAF_{AUC}$ = %.3f' % (roc_auc_RF))
#plt.plot(fpr_c, tpr_c,lw=3,label='$GR_{AUC}$ = %.3f' % (roc_auc_c))
plt.legend(loc="lower right", fontsize=18, frameon=False)
plt.plot([0,1], [0,1], 'k--')
plt.xlabel('False Positive Rate', fontsize=20)
plt.ylabel('True Positive Rate', fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=22)
plt.show()
"""### Perceivability"""
refvar="perceivability"
taglio=0.8
X=df_class_ref.drop(refvar,axis=1).copy()
y=df_class_ref[refvar].copy()
y_up_index = y >= taglio
y[y_up_index]=1
y_zero_index = y < taglio
y[y_zero_index]=0
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
clf_dt = DecisionTreeClassifier(criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42)
clf_dt = clf_dt.fit(X_train, y_train)
path = clf_dt.cost_complexity_pruning_path(X_train, y_train)
ccp_alphas = path.ccp_alphas
ccp_alphas = ccp_alphas[:-1]
clf_dts=[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(random_state=0, ccp_alpha=ccp_alpha)
clf_dt.fit(X_train, y_train)
clf_dts.append(clf_dt)
train_scores = [clf_dt.score(X_train, y_train) for clf_dt in clf_dts]
test_scores = [clf_dt.score(X_test, y_test) for clf_dt in clf_dts]
fig, ax =plt.subplots()
ax.set_xlabel("alpha")
ax.set_ylabel("accuracy")
ax.set_title("Accuracy vs alpha for training and testing sets")
ax.plot(ccp_alphas,train_scores, marker ='o',label='train',drawstyle='steps-post')
ax.plot(ccp_alphas,test_scores, marker ='o',label='test',drawstyle='steps-post')
ax.legend()
plt.show()
alpha_loop_values =[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=0, ccp_alpha=ccp_alpha)
scores= cross_val_score(clf_dt,X_train,y_train, cv=10)
alpha_loop_values.append([ccp_alpha,np.mean(scores), np.std(scores)])
alpha_results = pd.DataFrame(alpha_loop_values,
columns=['alpha','mean_accuracy','std'])
alpha_results.plot(x='alpha',
y='mean_accuracy',
marker='o',
linestyle='--')
indexmax = alpha_results[['mean_accuracy']].idxmax()
maxalpha=alpha_results.loc[indexmax,'alpha']
ideal_ccp_alpha = float(maxalpha)
clf_dt_pruned = DecisionTreeClassifier(criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42, ccp_alpha=ideal_ccp_alpha)
clf_dt_pruned = clf_dt_pruned.fit(X_train, y_train)
plot_confusion_matrix(clf_dt_pruned,
X_test,
y_test,
display_labels=['not peveivable','perveivable'])
plt.figure(figsize=(15,7.5))
from sklearn.tree import plot_tree
plot_tree(clf_dt_pruned,
filled=True,
rounded=True,
class_names=["not perceivable","perceivable"],
feature_names=X.columns)
y_pred = clf_dt_pruned.predict(X_train)
y_pred = clf_dt_pruned.predict(X_test)
print('Accuracy %s' % accuracy_score(y_test, y_pred))
print('F1-score %s' % f1_score(y_test, y_pred))
print(classification_report(y_test, y_pred))
y_score = clf_dt_pruned.predict_proba(X_test)
fpr, tpr, th = roc_curve(y_test, y_score[:,1])
roc_auc = auc(fpr, tpr)
print(roc_auc)
plt.figure(figsize=(8,5))
plt.plot(fpr, tpr, label='$AUC$ = %.3f' % (roc_auc))
plt.legend(loc="lower right", fontsize=14, frameon=False)
plt.plot([0,1], [0,1], 'k--')
plt.xlabel('False Positive Rate', fontsize=20)
plt.ylabel('True Positive Rate', fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=22)
plt.show()
#ROC for Decision Tree (Gini)
fpr_0, tpr_0, th_0 = roc_curve(y_test, y_score[:,1])
roc_auc_0 = auc(fpr_0, tpr_0)
#Entropy
clf_dt = DecisionTreeClassifier(criterion='entropy', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42)
clf_dt = clf_dt.fit(X_train, y_train)
path = clf_dt.cost_complexity_pruning_path(X_train, y_train)
ccp_alphas = path.ccp_alphas
ccp_alphas = ccp_alphas[:-1]
clf_dts=[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(criterion='entropy',random_state=0, ccp_alpha=ccp_alpha)
clf_dt.fit(X_train, y_train)
clf_dts.append(clf_dt)
train_scores = [clf_dt.score(X_train, y_train) for clf_dt in clf_dts]
test_scores = [clf_dt.score(X_test, y_test) for clf_dt in clf_dts]
alpha_loop_values =[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(criterion='entropy', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=0, ccp_alpha=ccp_alpha)
scores= cross_val_score(clf_dt,X_train,y_train, cv=10)
alpha_loop_values.append([ccp_alpha,np.mean(scores), np.std(scores)])
alpha_results = pd.DataFrame(alpha_loop_values,
columns=['alpha','mean_accuracy','std'])
indexmax = alpha_results[['mean_accuracy']].idxmax()
maxalpha=alpha_results.loc[indexmax,'alpha']
ideal_ccp_alpha = float(maxalpha)
print(ideal_ccp_alpha)
clf_dt_pruned = DecisionTreeClassifier(criterion='entropy', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42, ccp_alpha=ideal_ccp_alpha)
clf_dt_pruned = clf_dt_pruned.fit(X_train, y_train)
y_score = clf_dt_pruned.predict_proba(X_test)
fpr_en, tpr_en, th_en = roc_curve(y_test, y_score[:,1])
roc_auc_en = auc(fpr_en, tpr_en)
y_pred = clf_dt_pruned.predict(X_test)
print(classification_report(y_test, y_pred))
#KNN, find best score
acc = []
# Will take some time
for i in range(1,40):
neigh = KNeighborsClassifier(n_neighbors = i).fit(X_train,y_train)
yhat = neigh.predict(X_test)
acc.append(metrics.accuracy_score(y_test, yhat))
clf_knn = KNeighborsClassifier(n_neighbors=acc.index(max(acc)))
clf_knn.fit(X, y)
y_pred = clf_knn.predict(X_train)
y_score = clf_knn.predict_proba(X_test)
fpr_KNN, tpr_KNN, th_KNN = roc_curve(y_test, y_score[:,1])
roc_auc_KNN = auc(fpr_KNN, tpr_KNN)
y_pred = clf_knn.predict(X_test)
print(classification_report(y_test, y_pred))
# Instantiate model with 380 decision trees
model = RandomForestClassifier(n_estimators = 380, random_state = 42)
# Train the model on training data
ra=model.fit(X_train, y_train)
y_score = model.predict_proba(X_test)
fpr_RF, tpr_RF, th_RF = roc_curve(y_test, y_score[:,1])
roc_auc_RF = auc(fpr_RF, tpr_RF)
y_pred = model.predict(X_test)
print(classification_report(y_test, y_pred))
plt.figure(figsize=(8,5))
plt.plot(fpr_0, tpr_0,lw=3,label='$GINI_{AUC}$ = %.3f' % (roc_auc_0))
plt.plot(fpr_en, tpr_en,lw=3,label='$ENT_{AUC}$ = %.3f' % (roc_auc_en))
plt.plot(fpr_KNN, tpr_KNN,lw=3,label='$KNN_{AUC}$ = %.3f' % (roc_auc_KNN))
plt.plot(fpr_RF, tpr_RF,lw=3,label='$RAF_{AUC}$ = %.3f' % (roc_auc_RF))
#plt.plot(fpr_c, tpr_c,lw=3,label='$GR_{AUC}$ = %.3f' % (roc_auc_c))
plt.legend(loc="lower right", fontsize=18, frameon=False)
plt.plot([0,1], [0,1], 'k--')
plt.xlabel('False Positive Rate', fontsize=20)
plt.ylabel('True Positive Rate', fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=22)
plt.show()
"""### Age of Aquisition (binary)"""
refvar="aoa"
taglio=0.6
X=df_class_ref.drop(refvar,axis=1).copy()
y=df_class_ref[refvar].copy()
y_up_index = y >= taglio
y[y_up_index]=1
y_zero_index = y < taglio
y[y_zero_index]=0
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
clf_dt = DecisionTreeClassifier(criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42)
clf_dt = clf_dt.fit(X_train, y_train)
path = clf_dt.cost_complexity_pruning_path(X_train, y_train)
ccp_alphas = path.ccp_alphas
ccp_alphas = ccp_alphas[:-1]
clf_dts=[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(random_state=0, ccp_alpha=ccp_alpha)
clf_dt.fit(X_train, y_train)
clf_dts.append(clf_dt)
train_scores = [clf_dt.score(X_train, y_train) for clf_dt in clf_dts]
test_scores = [clf_dt.score(X_test, y_test) for clf_dt in clf_dts]
fig, ax =plt.subplots()
ax.set_xlabel("alpha")
ax.set_ylabel("accuracy")
ax.set_title("Accuracy vs alpha for training and testing sets")
ax.plot(ccp_alphas,train_scores, marker ='o',label='train',drawstyle='steps-post')
ax.plot(ccp_alphas,test_scores, marker ='o',label='test',drawstyle='steps-post')
ax.legend()
plt.show()
alpha_loop_values =[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=0, ccp_alpha=ccp_alpha)
scores= cross_val_score(clf_dt,X_train,y_train, cv=10)
alpha_loop_values.append([ccp_alpha,np.mean(scores), np.std(scores)])
alpha_results = pd.DataFrame(alpha_loop_values,
columns=['alpha','mean_accuracy','std'])
alpha_results.plot(x='alpha',
y='mean_accuracy',
marker='o',
linestyle='-')
indexmax = alpha_results[['mean_accuracy']].idxmax()
maxalpha=alpha_results.loc[indexmax,'alpha']
ideal_ccp_alpha = float(maxalpha)
clf_dt_pruned = DecisionTreeClassifier(criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42, ccp_alpha=ideal_ccp_alpha)
clf_dt_pruned = clf_dt_pruned.fit(X_train, y_train)
plot_confusion_matrix(clf_dt_pruned,
X_test,
y_test,
display_labels=['younger','older'])
plt.figure(figsize=(15,7.5))
from sklearn.tree import plot_tree
plot_tree(clf_dt_pruned,
filled=True,
rounded=True,
class_names=["younger","older"],
feature_names=X.columns)
y_pred = clf_dt_pruned.predict(X_train)
y_pred = clf_dt_pruned.predict(X_test)
print('Accuracy %s' % accuracy_score(y_test, y_pred))
print('F1-score %s' % f1_score(y_test, y_pred))
print(classification_report(y_test, y_pred))
y_score = clf_dt_pruned.predict_proba(X_test)
fpr, tpr, th = roc_curve(y_test, y_score[:,1])
roc_auc = auc(fpr, tpr)
print(roc_auc)
plt.figure(figsize=(8,5))
plt.plot(fpr, tpr, label='$AUC$ = %.3f' % (roc_auc))
plt.legend(loc="lower right", fontsize=14, frameon=False)
plt.plot([0,1], [0,1], 'k--')
plt.xlabel('False Positive Rate', fontsize=20)
plt.ylabel('True Positive Rate', fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=22)
plt.show()
#ROC for Decision Tree (Gini)
fpr_0, tpr_0, th_0 = roc_curve(y_test, y_score[:,1])
roc_auc_0 = auc(fpr_0, tpr_0)
#Entropy
clf_dt = DecisionTreeClassifier(criterion='entropy', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42)
clf_dt = clf_dt.fit(X_train, y_train)
path = clf_dt.cost_complexity_pruning_path(X_train, y_train)
ccp_alphas = path.ccp_alphas
ccp_alphas = ccp_alphas[:-1]
clf_dts=[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(criterion='entropy',random_state=0, ccp_alpha=ccp_alpha)
clf_dt.fit(X_train, y_train)
clf_dts.append(clf_dt)
train_scores = [clf_dt.score(X_train, y_train) for clf_dt in clf_dts]
test_scores = [clf_dt.score(X_test, y_test) for clf_dt in clf_dts]
alpha_loop_values =[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(criterion='entropy', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=0, ccp_alpha=ccp_alpha)
scores= cross_val_score(clf_dt,X_train,y_train, cv=10)
alpha_loop_values.append([ccp_alpha,np.mean(scores), np.std(scores)])
alpha_results = pd.DataFrame(alpha_loop_values,
columns=['alpha','mean_accuracy','std'])
indexmax = alpha_results[['mean_accuracy']].idxmax()
maxalpha=alpha_results.loc[indexmax,'alpha']
ideal_ccp_alpha = float(maxalpha)
print(ideal_ccp_alpha)
clf_dt_pruned = DecisionTreeClassifier(criterion='entropy', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42, ccp_alpha=ideal_ccp_alpha)
clf_dt_pruned = clf_dt_pruned.fit(X_train, y_train)
y_score = clf_dt_pruned.predict_proba(X_test)
fpr_en, tpr_en, th_en = roc_curve(y_test, y_score[:,1])
roc_auc_en = auc(fpr_en, tpr_en)
y_pred = clf_dt_pruned.predict(X_test)
print(classification_report(y_test, y_pred))
#KNN, find best score
acc = []
# Will take some time
for i in range(1,40):
neigh = KNeighborsClassifier(n_neighbors = i).fit(X_train,y_train)
yhat = neigh.predict(X_test)
acc.append(metrics.accuracy_score(y_test, yhat))
clf_knn = KNeighborsClassifier(n_neighbors=acc.index(max(acc)))
clf_knn.fit(X, y)
y_pred = clf_knn.predict(X_train)
y_score = clf_knn.predict_proba(X_test)
fpr_KNN, tpr_KNN, th_KNN = roc_curve(y_test, y_score[:,1])
roc_auc_KNN = auc(fpr_KNN, tpr_KNN)
y_pred = clf_knn.predict(X_test)
print(classification_report(y_test, y_pred))
# Instantiate model with 380 decision trees
model = RandomForestClassifier(n_estimators = 380, random_state = 42)
# Train the model on training data
ra=model.fit(X_train, y_train)
y_score = model.predict_proba(X_test)
fpr_RF, tpr_RF, th_RF = roc_curve(y_test, y_score[:,1])
roc_auc_RF = auc(fpr_RF, tpr_RF)
y_pred = model.predict(X_test)
print(classification_report(y_test, y_pred))
plt.figure(figsize=(8,5))
plt.plot(fpr_0, tpr_0,lw=3,label='$GINI_{AUC}$ = %.3f' % (roc_auc_0))
plt.plot(fpr_en, tpr_en,lw=3,label='$ENT_{AUC}$ = %.3f' % (roc_auc_en))
plt.plot(fpr_KNN, tpr_KNN,lw=3,label='$KNN_{AUC}$ = %.3f' % (roc_auc_KNN))
plt.plot(fpr_RF, tpr_RF,lw=3,label='$RAF_{AUC}$ = %.3f' % (roc_auc_RF))
#plt.plot(fpr_c, tpr_c,lw=3,label='$GR_{AUC}$ = %.3f' % (roc_auc_c))
plt.legend(loc="lower right", fontsize=18, frameon=False)
plt.plot([0,1], [0,1], 'k--')
plt.xlabel('False Positive Rate', fontsize=20)
plt.ylabel('True Positive Rate', fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=22)
plt.show()
"""### Web Corpus Frequency"""
refvar="web_corpus_freq"
X=df_class_ref.drop(refvar,axis=1).copy()
y=df_class_ref[refvar].copy()
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
clf_dt = DecisionTreeClassifier(criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42)
clf_dt = clf_dt.fit(X_train, y_train)
path = clf_dt.cost_complexity_pruning_path(X_train, y_train)
ccp_alphas = path.ccp_alphas
ccp_alphas = ccp_alphas[:-1]
clf_dts=[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(random_state=0, ccp_alpha=ccp_alpha)
clf_dt.fit(X_train, y_train)
clf_dts.append(clf_dt)
train_scores = [clf_dt.score(X_train, y_train) for clf_dt in clf_dts]
test_scores = [clf_dt.score(X_test, y_test) for clf_dt in clf_dts]
fig, ax =plt.subplots()
ax.set_xlabel("alpha")
ax.set_ylabel("accuracy")
ax.set_title("Accuracy vs alpha for training and testing sets")
ax.plot(ccp_alphas,train_scores, marker ='o',label='train',drawstyle='steps-post')
ax.plot(ccp_alphas,test_scores, marker ='o',label='test',drawstyle='steps-post')
ax.legend()
plt.show()
alpha_loop_values =[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=0, ccp_alpha=ccp_alpha)
scores= cross_val_score(clf_dt,X_train,y_train, cv=10)
alpha_loop_values.append([ccp_alpha,np.mean(scores), np.std(scores)])
alpha_results = pd.DataFrame(alpha_loop_values,
columns=['alpha','mean_accuracy','std'])
alpha_results.plot(x='alpha',
y='mean_accuracy',
yerr='std',
marker='o',
linestyle='--')
indexmax = alpha_results[['mean_accuracy']].idxmax()
maxalpha=alpha_results.loc[indexmax,'alpha']
ideal_ccp_alpha = float(maxalpha)
clf_dt_pruned = DecisionTreeClassifier(criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42, ccp_alpha=ideal_ccp_alpha)
clf_dt_pruned = clf_dt_pruned.fit(X_train, y_train)
plot_confusion_matrix(clf_dt_pruned,
X_test,
y_test,
display_labels=['4','5','6','7','8','9'],
)
plt.figure(figsize=(15,7.5))
from sklearn.tree import plot_tree
plot_tree(clf_dt_pruned,
filled=True,
rounded=True,
class_names=['4','5','6','7','8','9'],
feature_names=X.columns)
y_pred = clf_dt_pruned.predict(X_train)
y_pred = clf_dt_pruned.predict(X_test)
print('Accuracy %s' % accuracy_score(y_test, y_pred))
print('F1-score %s' % f1_score(y_test, y_pred,average=None))
print(classification_report(y_test, y_pred))
print(clf_dt_pruned.predict_proba(X_test))
report = classification_report(y_test, y_pred, output_dict=True)
export = pd.DataFrame(report).transpose()
print(export.to_latex())
"""## 3.2 Classification by KNN
### Age of Aquisition
#### choice of k
"""
refvar="aoa"
taglio=0.6
X=df_class_ref.drop(refvar,axis=1).copy()
y=df_class_ref[refvar].copy()
y_up_index = y >= taglio
y[y_up_index]=1
y_zero_index = y < taglio
y[y_zero_index]=0
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
k = 4
neigh = KNeighborsClassifier(n_neighbors = k).fit(X_train,y_train)
Pred_y = neigh.predict(X_test)
error_rate = []
for i in range(1,100):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(X_train,y_train)
pred_i = knn.predict(X_test)
error_rate.append(np.mean(pred_i != y_test))
plt.figure(figsize=(10,6))
plt.plot(range(1,100),error_rate,color='blue', linestyle='dashed',
marker='o',markerfacecolor='red', markersize=10)
plt.title('Error Rate vs. K Value')
plt.xlabel('K')
plt.ylabel('Error Rate')
print("Minimum error:-",min(error_rate),"at K =",error_rate.index(min(error_rate)))
acc = []
# Will take some time
from sklearn import metrics
for i in range(1,40):
neigh = KNeighborsClassifier(n_neighbors = i).fit(X_train,y_train)
yhat = neigh.predict(X_test)
acc.append(metrics.f1_score(y_test, yhat))
plt.figure(figsize=(10,6))
plt.plot(range(1,40),acc,color = 'blue',linestyle='dashed',
marker='o',markerfacecolor='red', markersize=10)
plt.title('accuracy vs. K Value')
plt.xlabel('K')
plt.ylabel('Accuracy')
print("Maximum accuracy:-",max(acc),"at K =",acc.index(max(acc)))
refvar="aoa"
taglio=0.6
X=df_class_ref.drop(refvar,axis=1).copy()
y=df_class_ref[refvar].copy()
y_up_index = y >= taglio
y[y_up_index]=1
y_zero_index = y < taglio
y[y_zero_index]=0
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
clf_knn = KNeighborsClassifier(n_neighbors=error_rate.index(min(error_rate)))
clf_knn.fit(X, y)
# apply KNN to train set
y_pred = clf_knn.predict(X_train)
y_pred[:5]
y_train.values[:5]
print('Accuracy', accuracy_score(y_train, y_pred))
print('F1', f1_score(y_train, y_pred, average='weighted'))
print( classification_report(y_train, y_pred) )
# Confusion matrix for trainset
# TP, FN, FP, TN
confusion_matrix(y_train, y_pred)
# apply KNN to test set
y_pred = clf_knn.predict(X_test)
print('Accuracy %s' % accuracy_score(y_test, y_pred))
print('F1-score %s' % f1_score(y_test, y_pred, average='weighted'))
print(classification_report(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
plot_confusion_matrix(clf_knn,
X_test,
y_test,
display_labels=['younger','older'])
y_score = clf_knn.predict_proba(X_test)
fpr, tpr, th = roc_curve(y_test, y_score[:,1])
roc_auc = auc(fpr, tpr)
print(roc_auc)
plt.figure(figsize=(8,5))
plt.plot(fpr, tpr, label='$AUC$ = %.3f' % (roc_auc))
plt.legend(loc="lower right", fontsize=14, frameon=False)
plt.plot([0,1], [0,1], 'k--')
plt.xlabel('False Positive Rate', fontsize=20)
plt.ylabel('True Positive Rate', fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=22)
plt.show()
"""### Valence
#### choice of k
"""
from sklearn.neighbors import KNeighborsClassifier
refvar="valence"
taglio=0.67
X=df_class_ref.drop(refvar,axis=1).copy()
y=df_class_ref[refvar].copy()
y_up_index = y >= taglio
y[y_up_index]=1
y_zero_index = y < taglio
y[y_zero_index]=0
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
k = 4
neigh = KNeighborsClassifier(n_neighbors = k).fit(X_train,y_train)
Pred_y = neigh.predict(X_test)
error_rate = []
for i in range(1,100):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(X_train,y_train)
pred_i = knn.predict(X_test)
error_rate.append(np.mean(pred_i != y_test))
plt.figure(figsize=(10,6))
plt.plot(range(1,100),error_rate,color='blue', linestyle='dashed',
marker='o',markerfacecolor='red', markersize=10)
plt.title('Error Rate vs. K Value')
plt.xlabel('K')
plt.ylabel('Error Rate')
print("Minimum error:-",min(error_rate),"at K =",error_rate.index(min(error_rate)))
acc = []
# Will take some time
from sklearn import metrics
for i in range(1,40):
neigh = KNeighborsClassifier(n_neighbors = i).fit(X_train,y_train)
yhat = neigh.predict(X_test)
acc.append(metrics.accuracy_score(y_test, yhat))
plt.figure(figsize=(10,6))
plt.plot(range(1,40),acc,color = 'blue',linestyle='dashed',
marker='o',markerfacecolor='red', markersize=10)
plt.title('accuracy vs. K Value')
plt.xlabel('K')
plt.ylabel('Accuracy')
print("Maximum accuracy:-",max(acc),"at K =",acc.index(max(acc)))
refvar="valence"
taglio=0.67
X=df_class_ref.drop(refvar,axis=1).copy()
y=df_class_ref[refvar].copy()
y_up_index = y >= taglio
y[y_up_index]=1
y_zero_index = y < taglio
y[y_zero_index]=0
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
clf_knn = KNeighborsClassifier(n_neighbors=error_rate.index(min(error_rate)))
clf_knn.fit(X, y)
y_pred = clf_knn.predict(X_train)
print('Accuracy', accuracy_score(y_train, y_pred))
print('F1', f1_score(y_train, y_pred, average='weighted'))
print( classification_report(y_train, y_pred) )
confusion_matrix(y_train, y_pred)
y_pred = clf_knn.predict(X_test)
print('Accuracy %s' % accuracy_score(y_test, y_pred))
print('F1-score %s' % f1_score(y_test, y_pred, average='weighted'))
print(classification_report(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
plot_confusion_matrix(clf_knn,
X_test,
y_test,
display_labels=['not valuable','valuable'])
y_score = clf_knn.predict_proba(X_test)
fpr, tpr, th = roc_curve(y_test, y_score[:,1])
roc_auc = auc(fpr, tpr)
print(roc_auc)
plt.figure(figsize=(8,5))
plt.plot(fpr, tpr, label='$AUC$ = %.3f' % (roc_auc))
plt.legend(loc="lower right", fontsize=14, frameon=False)
plt.plot([0,1], [0,1], 'k--')
plt.xlabel('False Positive Rate', fontsize=20)
plt.ylabel('True Positive Rate', fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=22)
plt.show()
"""### Polysemy
#### choice of k
"""
from sklearn.neighbors import KNeighborsClassifier
refvar="polysemy"
taglio=0.6
X=df_class_ref.drop(refvar,axis=1).copy()
y=df_class_ref[refvar].copy()
y_up_index = y >= taglio
y[y_up_index]=1
y_zero_index = y < taglio
y[y_zero_index]=0
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
k = 4
neigh = KNeighborsClassifier(n_neighbors = k).fit(X_train,y_train)
Pred_y = neigh.predict(X_test)
error_rate = []
for i in range(1,100):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(X_train,y_train)
pred_i = knn.predict(X_test)
error_rate.append(np.mean(pred_i != y_test))
plt.figure(figsize=(10,6))
plt.plot(range(1,100),error_rate,color='blue', linestyle='dashed',
marker='o',markerfacecolor='red', markersize=10)
plt.title('Error Rate vs. K Value')
plt.xlabel('K')
plt.ylabel('Error Rate')
print("Minimum error:-",min(error_rate),"at K =",error_rate.index(min(error_rate)))
acc = []
# Will take some time
from sklearn import metrics
for i in range(1,40):
neigh = KNeighborsClassifier(n_neighbors = i).fit(X_train,y_train)
yhat = neigh.predict(X_test)
acc.append(metrics.average_precision_score(y_test, yhat))
plt.figure(figsize=(10,6))
plt.plot(range(1,40),acc,color = 'blue',linestyle='dashed',
marker='o',markerfacecolor='red', markersize=10)
plt.title('F1 score vs. K Value')
plt.xlabel('K')
plt.ylabel('F1 Score')
print("Maximum F1:-",max(acc),"at K =",acc.index(max(acc)))
refvar="polysemy"
taglio=0.67
X=df_class_ref.drop(refvar,axis=1).copy()
y=df_class_ref[refvar].copy()
y_up_index = y >= taglio
y[y_up_index]=1
y_zero_index = y < taglio
y[y_zero_index]=0
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
clf_knn = KNeighborsClassifier(n_neighbors=acc.index(max(acc)))
clf_knn.fit(X, y)
y_pred = clf_knn.predict(X_train)
print('Accuracy', accuracy_score(y_train, y_pred))
print('F1', f1_score(y_train, y_pred, average='weighted'))
print( classification_report(y_train, y_pred) )
confusion_matrix(y_train, y_pred)
y_pred = clf_knn.predict(X_test)
print('Accuracy %s' % accuracy_score(y_test, y_pred))
print('F1-score %s' % f1_score(y_test, y_pred, average='weighted'))
print(classification_report(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
plot_confusion_matrix(clf_knn,
X_test,
y_test,
display_labels=['not polysemic','polysemic'])
y_score = clf_knn.predict_proba(X_test)
fpr, tpr, th = roc_curve(y_test, y_score[:,1])
roc_auc = auc(fpr, tpr)
print(roc_auc)
plt.figure(figsize=(8,5))
plt.plot(fpr, tpr, label='$AUC$ = %.3f' % (roc_auc))
plt.legend(loc="lower right", fontsize=14, frameon=False)
plt.plot([0,1], [0,1], 'k--')
plt.xlabel('False Positive Rate', fontsize=20)
plt.ylabel('True Positive Rate', fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=22)
plt.show()
"""## Random Forest
### Valence
"""
refvar='valence'
taglio=0.67
X=df_class_ref.drop(refvar,axis=1).copy()
y=df_class_ref[refvar].copy()
y_up_index = y >= taglio
y[y_up_index]=1
y_zero_index = y < taglio
y[y_zero_index]=0
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# Instantiate model with 10 decision trees
model = RandomForestClassifier(n_estimators = 380, random_state = 42)
# Train the model on training data
ra=model.fit(X_train, y_train)
#TESTING THE MODEL BY PREDICTING ON TEST DATA
#AND CALCULATE THE ACCURACY SCORE
prediction_test = model.predict(X_test)
#print(y_test, prediction_test)
#Print the prediction accuracy
print ("Accuracy = ", metrics.accuracy_score(y_test, prediction_test))
#Test accuracy for various test sizes and see how it gets better with more training data
#One amazing feature of Random forest is that it provides us info on feature importances
# Get numerical feature importances
#importances = list(model.feature_importances_)
#Let us print them into a nice format.
feature_list = list(X.columns)
feature_imp = pd.Series(model.feature_importances_,index=feature_list).sort_values(ascending=False)
print(feature_imp)
y_pred = model.predict(X_train)
y_pred = model.predict(X_test)
plot_confusion_matrix(ra,
X_test,
y_test,
display_labels=['not val','val'],
)
y_score = model.predict_proba(X_test)
fpr, tpr, th = roc_curve(y_test, y_score[:,1])
roc_auc = auc(fpr, tpr)
print(roc_auc)
plt.figure(figsize=(8,5))
plt.plot(fpr, tpr, label='$AUC$ = %.3f' % (roc_auc))
plt.legend(loc="lower right", fontsize=14, frameon=False)
plt.plot([0,1], [0,1], 'k--')
plt.xlabel('False Positive Rate', fontsize=20)
plt.ylabel('True Positive Rate', fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=22)
plt.show()
print('Accuracy %s' % accuracy_score(y_test, y_pred))
print('F1-score %s' % f1_score(y_test, y_pred,average='weighted'))
print(classification_report(y_test, y_pred))
"""### Polysemy"""
refvar='polysemy'
taglio=0.67
X=df_class_ref.drop(refvar,axis=1).copy()
y=df_class_ref[refvar].copy()
y_up_index = y >= taglio
y[y_up_index]=1
y_zero_index = y < taglio
y[y_zero_index]=0
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state = 42)
# Instantiate model with 10 decision trees
model = RandomForestClassifier(n_estimators = 385, random_state = 42)
# Train the model on training data
ra=model.fit(X_train, y_train)
#TESTING THE MODEL BY PREDICTING ON TEST DATA
#AND CALCULATE THE ACCURACY SCORE
prediction_test = model.predict(X_test)
#print(y_test, prediction_test)
#Print the prediction accuracy
print ("Accuracy = ", metrics.accuracy_score(y_test, prediction_test))
#Test accuracy for various test sizes and see how it gets better with more training data
#One amazing feature of Random forest is that it provides us info on feature importances
# Get numerical feature importances
#importances = list(model.feature_importances_)
#Let us print them into a nice format.
feature_list = list(X.columns)
feature_imp = pd.Series(model.feature_importances_,index=feature_list).sort_values(ascending=False)
print(feature_imp)
y_pred = model.predict(X_train)
y_pred = model.predict(X_test)
plot_confusion_matrix(ra,
X_test,
y_test,
display_labels=['not pol','pol']
)
y_score = model.predict_proba(X_test)
fpr, tpr, th = roc_curve(y_test, y_score[:,1])
roc_auc = auc(fpr, tpr)
print(roc_auc)
plt.figure(figsize=(8,5))
plt.plot(fpr, tpr, label='$AUC$ = %.3f' % (roc_auc))
plt.legend(loc="lower right", fontsize=14, frameon=False)
plt.plot([0,1], [0,1], 'k--')
plt.xlabel('False Positive Rate', fontsize=20)
plt.ylabel('True Positive Rate', fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=22)
plt.show()
print('Accuracy %s' % accuracy_score(y_test, y_pred))
print('F1-score %s' % f1_score(y_test, y_pred,average='weighted'))
print(classification_report(y_test, y_pred))
"""### Age of Aquisition"""
refvar='aoa'
taglio=0.6
X=df_class_ref.drop(refvar,axis=1).copy()
y=df_class_ref[refvar].copy()
y_up_index = y >= taglio
y[y_up_index]=1
y_zero_index = y < taglio
y[y_zero_index]=0
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# Instantiate model with 10 decision trees
model = RandomForestClassifier(n_estimators = 380, random_state = 42)
# Train the model on training data
ra=model.fit(X_train, y_train)
#TESTING THE MODEL BY PREDICTING ON TEST DATA
#AND CALCULATE THE ACCURACY SCORE
prediction_test = model.predict(X_test)
#print(y_test, prediction_test)
#Print the prediction accuracy
print ("Accuracy = ", metrics.accuracy_score(y_test, prediction_test))
#Test accuracy for various test sizes and see how it gets better with more training data
#One amazing feature of Random forest is that it provides us info on feature importances
# Get numerical feature importances
#importances = list(model.feature_importances_)
#Let us print them into a nice format.
feature_list = list(X.columns)
feature_imp = pd.Series(model.feature_importances_,index=feature_list).sort_values(ascending=False)
print(feature_imp)
y_pred = model.predict(X_train)
y_pred = model.predict(X_test)
plot_confusion_matrix(ra,
X_test,
y_test,
display_labels=['younger','older']
)
y_score = model.predict_proba(X_test)
fpr, tpr, th = roc_curve(y_test, y_score[:,1])
roc_auc = auc(fpr, tpr)
print(roc_auc)
plt.figure(figsize=(8,5))
plt.plot(fpr, tpr, label='$AUC$ = %.3f' % (roc_auc))
plt.legend(loc="lower right", fontsize=14, frameon=False)
plt.plot([0,1], [0,1], 'k--')
plt.xlabel('False Positive Rate', fontsize=20)
plt.ylabel('True Positive Rate', fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=22)
plt.show()
print('Accuracy %s' % accuracy_score(y_test, y_pred))
print('F1-score %s' % f1_score(y_test, y_pred,average='weighted'))
print(classification_report(y_test, y_pred))
"""### Out of bag error"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier
RANDOM_STATE = 42
refvar='valence'
taglio=0.67
X=df_class_ref.drop(refvar,axis=1).copy()
y=df_class_ref[refvar].copy()
y_up_index = y >= taglio
y[y_up_index]=1
y_zero_index = y < taglio
y[y_zero_index]=0
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for parallelized ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
(
"RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(
warm_start=True,
oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE,
),
),
(
"RandomForestClassifier, max_features='log2'",
RandomForestClassifier(
warm_start=True,
max_features="log2",
oob_score=True,
random_state=RANDOM_STATE,
),
),
(
"RandomForestClassifier, max_features=None",
RandomForestClassifier(
warm_start=True,
max_features=None,
oob_score=True,
random_state=RANDOM_STATE,
),
),
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 100
max_estimators = 1000
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1, 5):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
"""## Entropy (Decision Tree)
### Age of Acquisition
"""
refvar="aoa"
taglio=0.6
X=df_class_ref.drop(refvar,axis=1).copy()
y=df_class_ref[refvar].copy()
y_up_index = y >= taglio
y[y_up_index]=1
y_zero_index = y < taglio
y[y_zero_index]=0
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
clf_dt = DecisionTreeClassifier(criterion='entropy', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42)
clf_dt = clf_dt.fit(X_train, y_train)
path = clf_dt.cost_complexity_pruning_path(X_train, y_train)
ccp_alphas = path.ccp_alphas
ccp_alphas = ccp_alphas[:-1]
clf_dts=[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(criterion='entropy',random_state=0, ccp_alpha=ccp_alpha)
clf_dt.fit(X_train, y_train)
clf_dts.append(clf_dt)
train_scores = [clf_dt.score(X_train, y_train) for clf_dt in clf_dts]
test_scores = [clf_dt.score(X_test, y_test) for clf_dt in clf_dts]
fig, ax =plt.subplots()
ax.set_xlabel("alpha")
ax.set_ylabel("accuracy")
ax.set_title("Accuracy vs alpha for training and testing sets")
ax.plot(ccp_alphas,train_scores, marker ='o',label='train',drawstyle='steps-post')
ax.plot(ccp_alphas,test_scores, marker ='o',label='test',drawstyle='steps-post')
ax.legend()
plt.show()
alpha_loop_values =[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(criterion='entropy', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=0, ccp_alpha=ccp_alpha)
scores= cross_val_score(clf_dt,X_train,y_train, cv=10)
alpha_loop_values.append([ccp_alpha,np.mean(scores), np.std(scores)])
alpha_results = pd.DataFrame(alpha_loop_values,
columns=['alpha','mean_accuracy','std'])
alpha_results.plot(x='alpha',
y='mean_accuracy',
yerr='std',
marker='o',
linestyle='-')
alpha_results[(alpha_results['alpha']>0.002)
&
(alpha_results['alpha']<0.004)]
indexmax = alpha_results[['mean_accuracy']].idxmax()
maxalpha=alpha_results.loc[indexmax,'alpha']
ideal_ccp_alpha = float(maxalpha)
print(ideal_ccp_alpha)
clf_dt_pruned = DecisionTreeClassifier(criterion='entropy', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42, ccp_alpha=ideal_ccp_alpha)
clf_dt_pruned = clf_dt_pruned.fit(X_train, y_train)
plot_confusion_matrix(clf_dt_pruned,
X_test,
y_test,
display_labels=['young','old'])
plt.figure(figsize=(15,7.5))
from sklearn.tree import plot_tree
plot_tree(clf_dt_pruned,
filled=True,
rounded=True,
class_names=["young","old"],
feature_names=X.columns)
y_pred = clf_dt_pruned.predict(X_train)
y_pred = clf_dt_pruned.predict(X_test)
print('Accuracy %s' % accuracy_score(y_test, y_pred))
print('F1-score %s' % f1_score(y_test, y_pred, average=None))
print(classification_report(y_test, y_pred))
y_score = clf_dt_pruned.predict_proba(X_test)
fpr, tpr, th = roc_curve(y_test, y_score[:,1])
roc_auc = auc(fpr, tpr)
print(roc_auc)
plt.figure(figsize=(8,5))
plt.plot(fpr, tpr, label='$AUC$ = %.3f' % (roc_auc))
plt.legend(loc="lower right", fontsize=14, frameon=False)
plt.plot([0,1], [0,1], 'k--')
plt.xlabel('False Positive Rate', fontsize=20)
plt.ylabel('True Positive Rate', fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=22)
plt.show()
"""### Polysemy"""
refvar="polysemy"
taglio=0.6
X=df_class_ref.drop(refvar,axis=1).copy()
y=df_class_ref[refvar].copy()
y_up_index = y >= taglio
y[y_up_index]=1
y_zero_index = y < taglio
y[y_zero_index]=0
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
clf_dt = DecisionTreeClassifier(criterion='entropy', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=42)
clf_dt = clf_dt.fit(X_train, y_train)
path = clf_dt.cost_complexity_pruning_path(X_train, y_train)
ccp_alphas = path.ccp_alphas
ccp_alphas = ccp_alphas[:-1]
clf_dts=[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(criterion='entropy',random_state=0, ccp_alpha=ccp_alpha)
clf_dt.fit(X_train, y_train)
clf_dts.append(clf_dt)
train_scores = [clf_dt.score(X_train, y_train) for clf_dt in clf_dts]
test_scores = [clf_dt.score(X_test, y_test) for clf_dt in clf_dts]
fig, ax =plt.subplots()
ax.set_xlabel("alpha")
ax.set_ylabel("accuracy")
ax.set_title("Accuracy vs alpha for training and testing sets")
ax.plot(ccp_alphas,train_scores, marker ='o',label='train',drawstyle='steps-post')
ax.plot(ccp_alphas,test_scores, marker ='o',label='test',drawstyle='steps-post')
ax.legend()
plt.show()
alpha_loop_values =[]
for ccp_alpha in ccp_alphas:
clf_dt = DecisionTreeClassifier(criterion='entropy', max_depth=None, min_samples_split=2, min_samples_leaf=1,random_state=0, ccp_alpha=ccp_alpha)
scores= cross_val_score(clf_dt,X_train,y_train, cv=10)
alpha_loop_values.append([ccp_alpha,np.mean(scores), | np.std(scores) | numpy.std |
#!/usr/bin/env python
import rospy
import numpy as np
import sys
import os
import tf
from dbw_mkz_msgs.msg import SteeringReport
from sensor_msgs.msg import Image
from derived_object_msgs.msg import ObjectWithCovarianceArray
from std_msgs.msg import String
from sensor_msgs.msg import Imu
from geometry_msgs.msg import Pose
from geometry_msgs.msg import Twist
from geometry_msgs.msg import TwistStamped
from nav_msgs.msg import Odometry
from std_msgs.msg import Header
from darknet_ros_msgs.msg import BoundingBoxes
from darknet_ros_msgs.msg import ObjectCount
from nuscenes2bag.msg import RadarObjects
from utils import Mat_buildROS
from utils import Mat_extractROS
from flir_adk_multi.msg import trackArrayRdr
from flir_adk_multi.msg import trackRdr
from flir_adk_multi.msg import trackArrayCam
from flir_adk_multi.msg import trackCam
from sensor_msgs.msg import PointCloud2
from dbw_mkz_msgs.msg import WheelSpeedReport
from utils import CamObj
from utils import RadarObj
from utils import RadarObjMKZ
from std_msgs.msg import Float32MultiArray
from std_msgs.msg import MultiArrayDimension
from cv_bridge import CvBridge, CvBridgeError
import cv2
from itertools import permutations
import time
def main():
rospy.init_node('jpda', anonymous=True)
DataSetType=sys.argv[1]
Method=sys.argv[2]
PlotArg=sys.argv[3] # 0-No Plot; 1-Combined; 2-Cam; 3-Rdr; 4-Both Cam&Rdr
fusInst=jpda_class(DataSetType,Method,PlotArg)
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
class jpda_class():
def __init__(self,DataSetType,Method,PlotArg):
self.TrackPubRdr=rospy.Publisher("dataAssocRdr",trackArrayRdr, queue_size=2)
self.TrackPubCam=rospy.Publisher("dataAssocCam",trackArrayCam, queue_size=2)
self.image_pub=rospy.Publisher("fusedImage",Image, queue_size=2)
filePathPrefix=str("/home/vamsi/Tracking/py-motmetrics/motmetrics/res_dir/")
self.DestF=open((filePathPrefix+'seq1'+'.txt'),"w")
# self.YoloClassList=[0,1,2,3,5,7] # For NuSc
# self.YoloClassList=[0,1,2] # For Yolov3_flir
self.YoloClassList=[2] # For Matlab Sim Data
self.GateThreshRdr =1# Scaling factor, threshold for gating
self.GateThreshCam=10# TODO: adjust?
self.trackInitRdrThresh=0.5 # For track initiation
self.trackInitCamThresh=20 # Radius of 15 pixels allowed
self.CombGateThresh=10# in pixels (added to radius buffer)
self.bridge=CvBridge()
self.font=cv2.FONT_HERSHEY_SIMPLEX
# Initializing parameters:
self.Q_rdr=np.array([[10,0,0,0],[0,10,0,0],[0,0,5,0],[0,0,0,1]])
self.R_rdr=np.array([[3,0,0],[0,3,0],[0,0,3]])
# self.Q_cam=np.diag([10,10,15,15,10,10,15,15])
self.Q_cam=np.diag([5,5,10,10,10,10,20,20])
self.R_cam=np.array([[5,0,0,0],[0,5,0,0],[0,0,5,0],[0,0,0,5]])
self.CamMsrtmMatrixH=np.array([[1,0,0,0,0,0,0,0],[0,1,0,0,0,0,0,0],\
[0,0,1,0,0,0,0,0],[0,0,0,1,0,0,0,0]]) # Only positions and w/h are measured)
self.Vt=0.0
self.velY=0.0
self.velX=0.0
self.psi=0.0
self.psiD=0.0# psiDot
self.Method=Method
self.PlotArg=PlotArg
self.HorzOffset=0# For translation from radar to cam coordinates, manual offset
self.CamXOffset=2.36#=93 inches, measured b/w cam and Rdr, in x direction
self.CamZoffset=1 # Roughly 40 inches
self.imageTime=Header()
self.BBoxStore=BoundingBoxes()
#Params for writing tracks to TXT:
self.delta_x = 0
self.delta_y = 0 # Assuming that the radar and camera are on same centerline
self.delta_z = 1.0414/2
self.H_FOV=190
self.V_FOV=41 #Calculated based on aspect ratio
self.HorzOffsetTXT=0 # Manual horizontal (Y-direction) offset for radar in pixels
self.VertOffsetTXT=-30 # Manual vertical (Z-direction) offset for radar in pixels
self.ImageExists=0
self.BBheight=90
self.BBWidth=90 # For now, static
self.FrameInit=1
self.UseCamTracksOnly=1 #1 if using only camera tracks, 0 if using combined tracks for eval
if DataSetType=="NuSc":
rospy.Subscriber('/cam_front/raw', Image, self.buildImage)
rospy.Subscriber('/vel', Twist, self.Odom1NuSc)
rospy.Subscriber('/odom', Odometry, self.Odom2NuSc)
rospy.Subscriber('/imu', Imu, self.Odom3NuSc)
rospy.Subscriber('/radar_front', BoundingBoxes, self.RdrMsrmtsNuSc)
elif DataSetType=="MKZ":
self.CamFOV=190.0
rospy.Subscriber('/Thermal_Panorama', Image, self.buildImage)
rospy.Subscriber('/imu/data', Imu, self.Odom2MKZ) # TODO: fix after IMU is available
rospy.Subscriber('/vehicle/twist', TwistStamped,self.Odom3MKZ)
rospy.Subscriber('/darknet_ros/bounding_boxes', BoundingBoxes,self.BBoxBuilder)
rospy.Subscriber('/os_cloud_node/points', PointCloud2,self.writeToFile) #Only write to file everytime a new lidar PCL is published
rate=rospy.Rate(10) # 20 Hz
while not rospy.is_shutdown():
# CycleStartTime=time.time()
# startTime=time.time()
# rospy.Subscriber('/as_tx/objects', ObjectWithCovarianceArray,self.RdrMsrmtsMKZ)
# rospy.Subscriber('/darknet_ros/found_object', ObjectCount,self.CamMsrmts)
self.RdrMsrmtsMKZ(rospy.wait_for_message('/as_tx/objects', ObjectWithCovarianceArray))
self.CamMsrmts(rospy.wait_for_message('/darknet_ros/found_object', ObjectCount))
# # print('TOTAL for RDR:' + str(time.time()-startTime))
# # startTime=time.time()
# try:
# rospy.Subscriber('/darknet_ros/found_object', ObjectCount,self.CamMsrmts)
# except:
# rospy.loginfo('No Camera Data/Bounding Boxes found')
# pass
# print('TOTAL for CAM:' + str(time.time()-startTime))
# startTimeCom=time.time()
# print('Time Combining:' + str(time.time()-startTimeCom))
# print('Total Cycle Time:' + str(time.time()-CycleStartTime))
self.CamRdrCombine()
rate.sleep()
elif DataSetType=="matlab":
self.CamFOV=50
rospy.Subscriber('/Thermal_Panorama', Image, self.buildImage)
rospy.Subscriber('/imu/data', Imu, self.Odom2MKZ) # TODO: fix after IMU is available
rospy.Subscriber('/vehicle/twist', TwistStamped,self.Odom3MKZ)
rospy.Subscriber('/darknet_ros/bounding_boxes', BoundingBoxes,self.BBoxBuilder)
rate=rospy.Rate(10) # 20 Hz
while not rospy.is_shutdown():
# CycleStartTime=time.time()
# startTime=time.time()
# rospy.Subscriber('/as_tx/objects', ObjectWithCovarianceArray,self.RdrMsrmtsMKZ)
# rospy.Subscriber('/darknet_ros/found_object', ObjectCount,self.CamMsrmts)
self.RdrMsrmtsMKZ(rospy.wait_for_message('/as_tx/objects', ObjectWithCovarianceArray))
self.CamMsrmts(rospy.wait_for_message('/darknet_ros/found_object', ObjectCount))
# # print('TOTAL for RDR:' + str(time.time()-startTime))
# # startTime=time.time()
# try:
# rospy.Subscriber('/darknet_ros/found_object', ObjectCount,self.CamMsrmts)
# except:
# rospy.loginfo('No Camera Data/Bounding Boxes found')
# pass
# print('TOTAL for CAM:' + str(time.time()-startTime))
# startTimeCom=time.time()
# print('Time Combining:' + str(time.time()-startTimeCom))
# print('Total Cycle Time:' + str(time.time()-CycleStartTime))
self.CamRdrCombine()
rate.sleep()
def buildImage(self,data):
if not(hasattr(self,'image')):
self.image=[]
self.image=self.bridge.imgmsg_to_cv2(data, "rgb8")
self.imageTime=data.header
self.ImageExists=1
def Odom1NuSc(self,data):
self.Vt =data.linear.x
def Odom2NuSc(self,data):
self.psi=data.pose.pose.orientation.z
def Odom3NuSc(self,data):
self.psiD=data.angular_velocity.z
def Odom1MKZ(self,data): # REMOVE
self.Vt=data.speed
def Odom2MKZ(self,data):
self.psi=tf.transformations.euler_from_quaternion([data.orientation.x,data.orientation.y,data.orientation.z,data.orientation.w])[2]
# psi above is in radians, with 0 facing due EAST, not north
def Odom3MKZ(self,data):
self.psiD=data.twist.angular.z
self.Vt=data.twist.linear.x
self.velX=self.Vt # For use in calculating velocity of cut in vehicle(tracking target), Vc
def writeToFile(self,data):
# print('Writing ToFile')
if not hasattr(self,'CombinedTracks'):
return
# self.Readoings=[]
# n=len(self.RadarTracks)
RadarAnglesH=0.0
RadarAnglesV=0.0
frame=self.FrameInit
self.FrameInit+=1
if self.UseCamTracksOnly==1:
writeTracks=self.CurrentCamTracks
else:
writeTracks=self.CombinedTracks
for idx in range(len(writeTracks.tracks)):
# if (data.objects[idx].pose.pose.position.x==0.0) and (data.objects[idx].pose.pose.position.y==0.0) and (data.objects[idx].pose.covariance[0]==0.0):
# continue #Zero entry, so skip it
# else: #write to file
# <frame>, <id>, <bb_left>, <bb_top>, <bb_width>, <bb_height>, <conf>, <x>, <y>, <z>
id=int(idx+1) # TODO: This is temp, not true ID of car
# RadarX=data.objects[idx].pose.pose.position.x+self.delta_x
# RadarY=data.objects[idx].pose.pose.position.y
# RadarZ=0.0+self.delta_z
# RadarAnglesH=-np.degrees(np.arctan(np.divide(RadarY,RadarX)))
# RadarAnglesV=np.abs(np.degrees(np.arctan(np.divide(RadarZ,RadarX)))) #will always be negative, so correct for it
if self.ImageExists==1:
# imageTemp = self.image
# print(imageTemp.shape)
# CameraX=RadarAnglesH*(self.image.shape[1]/self.H_FOV) + self.image.shape[1]/2 +self.HorzOffsetTXT# Number of pixels per degree,adjusted for shifting origin from centerline to top left
# CameraY=RadarAnglesV*(self.image.shape[0]/self.V_FOV) +256 +self.VertOffsetTXT -RadarX*np.sin(np.radians(4)) # Number of pixels per degree,adjusted for shifting origin from centerline to top left
#Write to File
bb_left=int(writeTracks.tracks[idx].yPx.data)
bb_top=int(writeTracks.tracks[idx].zPx.data)
bb_width=int(writeTracks.tracks[idx].width.data)
bb_height=int(writeTracks.tracks[idx].height.data)
x=-1 # Fillers
y=-1
z=-1
conf=1
outLine=str(frame)+' '+str(id)+' '+str(bb_left)+' '+str(bb_top)+' '+str(bb_width)+' '+str(bb_height)+' '+str(conf)+' '+str(x)+' '+str(y)+' '+str(z)+'\n'
# print(outLine)
self.DestF.write(outLine)
def CamIOUcheck(self,checkIdx):
#Return boolean. checks if IOU of given SensorIndex over any Current tracks is greater than threshold
# if it is, then returns false
outBool2=True # By default
#TODO: perform check if required
return outBool2
def trackInitiator(self,SensorData):
if not any(SensorData):
return
elif isinstance(SensorData[0],CamObj):
if hasattr(self, 'InitiatedCamTracks'):
# Then, move to current tracks based on NN-style gating
toDel=[]
InitiatedCamTracks=self.InitiatedCamTracks
# first build array of all sensor indices that are within validation gate of current tracks
if hasattr(self,'CurrentCamTracks'):
TempCurrTracks=self.CurrentCamTracks
SensorIndicesInit=[]
for cdx in range(len(TempCurrTracks.tracks)):
SensorIndicesInit.append(self.ValidationGate(SensorData,TempCurrTracks.tracks[cdx]))
else:
SensorIndicesInit=[]
for idx in range(len(InitiatedCamTracks.tracks)):
R=[]
if len(SensorData)==0:
continue
for jdx in range(len(SensorData)):
# If the Sensor Data is already in validatation gate of any of the currentTracks, skip adding that into InitiatedTracks
if self.InitSensorValidator(SensorIndicesInit,jdx):
continue
else:
R.append(np.sqrt((InitiatedCamTracks.tracks[idx].yPx.data-(SensorData[jdx].xmax+SensorData[jdx].xmin)/2)**2 \
+(InitiatedCamTracks.tracks[idx].zPx.data-(SensorData[jdx].ymax+SensorData[jdx].ymin)/2)**2))
if len(R)==0:
R=9000 #Arbitrarily large value
R=np.asarray(R)
# print()
# print(R)
if (np.min(R)<self.trackInitCamThresh): # Then move this to current track # Inherent assumption here is that only one will be suitable
jdx=np.argmin(R)
if not hasattr(self, 'CurrentCamTracks'):
self.CurrentCamTracks=trackArrayCam()
delT=self.imageTime.stamp-InitiatedCamTracks.header.stamp
delT=delT.to_sec()
self.CurrentCamTracks.header=SensorData[jdx].header
InitiatedCamTracks.tracks[idx].Stat.data=1 # Moving to current track
# Update the track with new sensor data before pushing to Current tracks
InitiatedCamTracks.tracks[idx].VyPx.data=\
(InitiatedCamTracks.tracks[idx].yPx.data-(SensorData[jdx].xmax+SensorData[jdx].xmin)/2)/delT
InitiatedCamTracks.tracks[idx].VzPx.data=\
(InitiatedCamTracks.tracks[idx].zPx.data-(SensorData[jdx].ymax+SensorData[jdx].ymin)/2)/delT
InitiatedCamTracks.tracks[idx].widthDot.data=\
(InitiatedCamTracks.tracks[idx].width.data-(SensorData[jdx].xmax-SensorData[jdx].xmin))/delT
InitiatedCamTracks.tracks[idx].heightDot.data=\
(InitiatedCamTracks.tracks[idx].height.data-(SensorData[jdx].ymax-SensorData[jdx].ymin))/delT
InitiatedCamTracks.tracks[idx].height.data=(SensorData[jdx].ymax-SensorData[jdx].ymin)
InitiatedCamTracks.tracks[idx].width.data=(SensorData[jdx].xmax-SensorData[jdx].xmin)
InitiatedCamTracks.tracks[idx].yPx.data=(SensorData[jdx].xmax+SensorData[jdx].xmin)/2
InitiatedCamTracks.tracks[idx].zPx.data=(SensorData[jdx].ymax+SensorData[jdx].ymin)/2
InitiatedCamTracks.tracks[idx].confidence=SensorData[jdx].confidence
Pk=np.diag([5,5,5,5,50,50,50,50]) # Initial covariance matrix
InitiatedCamTracks.tracks[idx].P=Mat_buildROS(Pk)
self.CurrentCamTracks.tracks=np.append(self.CurrentCamTracks.tracks,InitiatedCamTracks.tracks[idx])
toDel.append(idx)
SensorData=np.delete(SensorData,np.argmin(R))
else: # for this idx of InitiatedCamTrack, the last jdx, so no measurements are nearby; delete the idx
toDel.append(idx)
#Clean all InitiatedCamTracks using toDel
self.InitiatedCamTracks.tracks=np.delete(InitiatedCamTracks.tracks,toDel)
#Remove old initiated tracks (if idle for more than 3 time steps):
toDel2=[]
for idx in range(len(self.InitiatedCamTracks.tracks)):
self.InitiatedCamTracks.tracks[idx].Stat.data=self.InitiatedCamTracks.tracks[idx].Stat.data-1
if self.InitiatedCamTracks.tracks[idx].Stat.data<0:
toDel2.append(idx)
self.InitiatedCamTracks.tracks=np.delete(self.InitiatedCamTracks.tracks,toDel2)
# Then concatenate remaining sensor Data for future initation
if len(SensorData)==0:
return
self.InitiatedCamTracks.header=SensorData[0].header
for idx in range(len(SensorData)):
self.InitiatedCamTracks.tracks=np.append(self.InitiatedCamTracks.tracks,trackCam())
self.InitiatedCamTracks.tracks[-1].Stat.data= -1 # InitiatedTrack
self.InitiatedCamTracks.tracks[-1].yPx.data=(SensorData[idx].xmax+SensorData[idx].xmin)/2
self.InitiatedCamTracks.tracks[-1].zPx.data=(SensorData[idx].ymax+SensorData[idx].ymin)/2
self.InitiatedCamTracks.tracks[-1].VyPx.data=0
self.InitiatedCamTracks.tracks[-1].VzPx.data=0
self.InitiatedCamTracks.tracks[-1].width.data=(SensorData[idx].xmax-SensorData[idx].xmin)
self.InitiatedCamTracks.tracks[-1].widthDot.data=0
self.InitiatedCamTracks.tracks[-1].height.data=(SensorData[idx].ymax-SensorData[idx].ymin)
self.InitiatedCamTracks.tracks[-1].heightDot.data=0
self.InitiatedCamTracks.tracks[-1].confidence=SensorData[idx].confidence
else: # Start of algorithm, no tracks
self.InitiatedCamTracks=trackArrayCam()
self.InitiatedCamTracks.header=SensorData[0].header
for idx in range(len(SensorData)):
self.InitiatedCamTracks.tracks=np.append(self.InitiatedCamTracks.tracks,trackCam())
self.InitiatedCamTracks.tracks[-1].Stat.data=-1 # Initiated Track
self.InitiatedCamTracks.tracks[-1].yPx.data=(SensorData[idx].xmax+SensorData[idx].xmin)/2
self.InitiatedCamTracks.tracks[-1].zPx.data=(SensorData[idx].ymax+SensorData[idx].ymin)/2
self.InitiatedCamTracks.tracks[-1].VyPx.data=0
self.InitiatedCamTracks.tracks[-1].VzPx.data=0
self.InitiatedCamTracks.tracks[-1].width.data=(SensorData[idx].xmax-SensorData[idx].xmin)
self.InitiatedCamTracks.tracks[-1].widthDot.data=0
self.InitiatedCamTracks.tracks[-1].height.data=(SensorData[idx].ymax-SensorData[idx].ymin)
self.InitiatedCamTracks.tracks[-1].heightDot.data=0
self.InitiatedCamTracks.tracks[-1].confidence=SensorData[idx].confidence
elif isinstance(SensorData[0],RadarObj) or isinstance(SensorData[0],RadarObjMKZ):
if hasattr(self, 'InitiatedRdrTracks'):# Some (or Zer0) tracks already exists (i.e, not start of algorithm)
toDel=[]
InitiatedRdrTracks=self.InitiatedRdrTracks
# first build array of all sensor indices that are within validation gate of current tracks
if hasattr(self,'CurrentRdrTracks'):
TempCurrTracksRdr=self.CurrentRdrTracks
SensorIndicesInitRdr=[]
for cdx in range(len(TempCurrTracksRdr.tracks)):
SensorIndicesInitRdr.append(self.ValidationGate(SensorData,TempCurrTracksRdr.tracks[cdx]))
else:
SensorIndicesInitRdr=[]
for idx in range(len(self.InitiatedRdrTracks.tracks)):
gateValX=[]
gateValY=[]
gateValRMS=[]
# Find all sensor objects within some gate
if len(SensorData)==0:
continue
for jdx in range(len(SensorData)):
if self.InitSensorValidator(SensorIndicesInitRdr,jdx):
continue
else:
gateValX.append(np.abs(SensorData[jdx].pose.position.x-self.InitiatedRdrTracks.tracks[idx].x.data))
gateValY.append(np.abs(SensorData[jdx].pose.position.y-self.InitiatedRdrTracks.tracks[idx].y.data))
gateValRMS.append(np.sqrt((gateValX[-1])**2+(gateValY[-1])**2))
if len(gateValRMS)==0:
gateValRMS=1000# Arbitrary large value, greater than trackInitRdrThresh
if (np.min(np.array(gateValRMS))<=self.trackInitRdrThresh): # @50Hz, 20m/s in X dir and 10m/s in Y-Direction as validation gate
#If gate is satisfied, move to CurrentRdrTracks after initiating P and deleting that SensorData[idx]
self.InitiatedRdrTracks.tracks[idx].P=Mat_buildROS(np.array([[3,0,0,0],[0,3,0,0],[0,0,3,0],[0,0,0,1]]))
#(Large uncertainity given to Beta. Others conservatively picked based on Delphi ESR spec sheet)
self.InitiatedRdrTracks.tracks[idx].Stat.data=1# Moving to CurrentRdrTracks
x=self.InitiatedRdrTracks.tracks[idx].x.data
y=self.InitiatedRdrTracks.tracks[idx].y.data
Vc=self.InitiatedRdrTracks.tracks[idx].Vc.data
Beta=self.InitiatedRdrTracks.tracks[idx].B.data
psi=np.array([self.psi])
psiD=np.array([self.psiD])
Vt=self.Vt
posNorm=np.sqrt(x**2+y**2)
H31=(Vc*np.sin((psi-Beta).astype(float))*y**2-x*y*(Vc*np.cos((psi-Beta).astype(float))-Vt))/(posNorm**3)
H32=(-Vc*np.sin((psi-Beta).astype(float))*x*y+x**2*(Vc*np.cos((psi-Beta).astype(float))-Vt))/(posNorm**3)
H33=x*np.sin((psi-Beta).astype(float))/posNorm+y*np.cos((psi-Beta).astype(float))/posNorm
H34=(-x*Vc*np.cos((psi-Beta).astype(float))+y*Vc*np.sin((psi-Beta).astype(float)))/posNorm
Hk=np.array([[1,0,0,0],[x/posNorm,y/posNorm,0,0],[H31,H32,H33,H34]])
self.InitiatedRdrTracks.tracks[idx].H=Mat_buildROS(Hk)
if hasattr(self, 'CurrentRdrTracks'):
pass
else:
self.CurrentRdrTracks=trackArrayRdr()
self.CurrentRdrTracks.header=self.InitiatedRdrTracks.header
self.CurrentRdrTracks.tracks=np.append(self.CurrentRdrTracks.tracks,self.InitiatedRdrTracks.tracks[idx])
#Build Arrays for deletion:
toDel.append(idx)
#Also Delete the corresponding SensorData value:
SensorData=np.delete(SensorData,np.argmin(gateValRMS))
else: # none of the SensorData is close to InitiatedRdrTracks[idx], so delete it
toDel.append(idx)
# Clean all InitiatedRdrTracks with status 1
self.InitiatedRdrTracks.tracks=np.delete(self.InitiatedRdrTracks.tracks,toDel)
#Remove old initiated tracks:(if idle for more than 2 time steps):
toDel2=[]
for idx in range(len(self.InitiatedRdrTracks.tracks)):
self.InitiatedRdrTracks.tracks[idx].Stat.data=self.InitiatedRdrTracks.tracks[idx].Stat.data-1
if self.InitiatedRdrTracks.tracks[idx].Stat.data<=-3:
toDel2.append(idx)
self.InitiatedRdrTracks.tracks=np.delete(self.InitiatedRdrTracks.tracks,toDel2)
# Then concatenate remaining sensor Data for future initation
if len(SensorData)==0:
return
self.InitiatedRdrTracks.header=SensorData[0].header
for idx in range(len(SensorData)):
self.InitiatedRdrTracks.tracks=np.append(self.InitiatedRdrTracks.tracks,trackRdr())
self.InitiatedRdrTracks.tracks[-1].Stat.data= -1 # InitiatedTrack
self.InitiatedRdrTracks.tracks[-1].x.data=SensorData[idx].pose.position.x
self.InitiatedRdrTracks.tracks[-1].y.data=SensorData[idx].pose.position.y
self.InitiatedRdrTracks.tracks[-1].Vc.data=np.sqrt(SensorData[idx].vx_comp**2+SensorData[idx].vy_comp**2)
self.InitiatedRdrTracks.tracks[-1].B.data=self.psi -(np.arctan(SensorData[idx].pose.position.y/(0.0001 if (SensorData[idx].pose.position.x)==0.0 else (SensorData[idx].pose.position.x))))
# TODO: Improve Beta estimate by taking into account relative Vx(invert heading if object istraveling towards car)
else: # Start of algorithm, no tracks
self.InitiatedRdrTracks=trackArrayRdr()
self.InitiatedRdrTracks.header=SensorData[0].header
for idx in range(len(SensorData)):
self.InitiatedRdrTracks.tracks=np.append(self.InitiatedRdrTracks.tracks,trackRdr())
self.InitiatedRdrTracks.tracks[-1].Stat.data= -1 # InitiatedTrack
self.InitiatedRdrTracks.tracks[-1].x.data=SensorData[idx].pose.position.x
self.InitiatedRdrTracks.tracks[-1].y.data=SensorData[idx].pose.position.y
self.InitiatedRdrTracks.tracks[-1].Vc.data=np.sqrt(SensorData[idx].vx_comp**2+SensorData[idx].vy_comp**2)
self.InitiatedRdrTracks.tracks[-1].B.data=self.psi -(np.arctan(SensorData[idx].pose.position.y/(0.0001 if (SensorData[idx].pose.position.x)==0.0 else (SensorData[idx].pose.position.x))))
# TODO: Improve Beta estimate by taking into account relative Vx(invert heading if object istraveling towards car)
def trackDestructor(self,SensorData):
if not any(SensorData):
return
if isinstance(SensorData[0],CamObj):
if not (hasattr(self,'CurrentCamTracks')):
return
toDel=[]
for idx in range(len(self.CurrentCamTracks.tracks)):
if self.CurrentCamTracks.tracks[idx].Stat.data>=2:# Testing, made less persistent
toDel.append(idx)
self.CurrentCamTracks.tracks=np.delete(self.CurrentCamTracks.tracks,toDel)
elif isinstance(SensorData[0],RadarObj) or isinstance(SensorData[0],RadarObjMKZ):
if not(hasattr(self,'CurrentRdrTracks')):
return
toDel=[]
for idx in range(len(self.CurrentRdrTracks.tracks)):
if self.CurrentRdrTracks.tracks[idx].Stat.data>=4: # If no measurements associated for 4 steps
toDel.append(idx)
self.CurrentRdrTracks.tracks=np.delete(self.CurrentRdrTracks.tracks,toDel)
def trackMaintenance(self,SensorData):
if not any(SensorData):
return
if isinstance(SensorData[0],CamObj):
if not hasattr(self, 'CurrentCamTracks'):
return
SensorIndices=[]
for idx in range(len(self.CurrentCamTracks.tracks)):
SensorIndices.append(self.ValidationGate(SensorData,self.CurrentCamTracks.tracks[idx]))#Clean the incoming data - outputs 2D python array
# Above yields array of possible measurments (only indices) corresponding to a particular track
# startTime1=time.time()
self.KalmanEstimate(SensorData,SensorIndices, self.Method) # Includes DataAssociation Calcs
# print('Time for KalmanEstimate:' + str(time.time()-startTime1))
# startTime2=time.time()
self.KalmanPropagate(SensorData)
# print('Time for KalmanPropagate:' + str(time.time()-startTime2))
self.TrackPubCam.publish(self.CurrentCamTracks)
elif isinstance(SensorData[0],RadarObj) or isinstance(SensorData[0],RadarObjMKZ):
if not hasattr(self, 'CurrentRdrTracks'):
return
SensorIndices=[]
for idx in range(len(self.CurrentRdrTracks.tracks)):
SensorIndices.append(self.ValidationGate(SensorData,self.CurrentRdrTracks.tracks[idx]))#Clean the incoming data - outputs 2D python array
# Above yields array of possible measurments (only indices) corresponding to a particular track
# startTimeKE=time.time()
self.KalmanEstimate(SensorData,SensorIndices, self.Method) # Includes DataAssociation Calcs
# print('Time for KalmanEstimate:' + str(time.time()-startTimeKE))
# startTimeKP=time.time()
self.KalmanPropagate(SensorData)
# print('Time for KalmanPropagate:' + str(time.time()-startTimeKP))
# self.TrackPubRdr.publish(header=self.CurrentRdrTracks.header, tracks =self.CurrentRdrTracks.tracks)
# rospy.loginfo_once('Current tracks published to topic /dataAssoc')
def InitSensorValidator(self,SensorIndicesInit,jdx):
#takes SensorIndices 2 D python array and current Sensor index being checked;
# returns true if the current index is in the 2D array
outBool=False # By default
if len(SensorIndicesInit)==0:
return outBool
for sens_idx in range(len(SensorIndicesInit)):
if jdx in SensorIndicesInit[sens_idx]:
outBool=True
return outBool
def trackPlotter(self):
if not (hasattr(self,'image')) or (self.PlotArg=='0'):
return # Skip function call if image is not available or plotting is disabled
LocalImage=self.image
if (self.PlotArg=='3') or (self.PlotArg=='4'): # Then, plot Radar stuff
if not hasattr(self,'CurrentRdrTracks'):
return # Skip
CurrentRdrTracks=self.CurrentRdrTracks
n=len(CurrentRdrTracks.tracks)
RadarAnglesH=np.zeros((n,1))
RadarAnglesV=np.zeros((n,1))
# Camera Coordinates: X is horizontal, Y is vertical starting from left top corner
CirClr=[]
for idx1 in range(len(CurrentRdrTracks.tracks)):
temp1=np.divide(CurrentRdrTracks.tracks[idx1].y.data,CurrentRdrTracks.tracks[idx1].x.data)
RadarAnglesH[idx1]=-np.degrees(np.arctan(temp1.astype(float)))
temp2=np.divide(self.CamZoffset,CurrentRdrTracks.tracks[idx1].x.data+self.CamXOffset)
RadarAnglesV[idx1]=np.abs(np.degrees(np.arctan(temp2.astype(float)))) #will always be negative, so correct for it
if (CurrentRdrTracks.tracks[idx1].Stat.data>=1) and (CurrentRdrTracks.tracks[idx1].Stat.data<14): #Current Track- green
CirClr.append(np.array([0,255,0]))
elif CurrentRdrTracks.tracks[idx1].Stat.data<=0: # Candidate Tracks for initialization - blue
CirClr.append(np.array([255,0,0]))
else: # Candidate for Destructor-orange
CirClr.append(np.array([0,165,255]))
CameraX=np.dot(RadarAnglesH,(self.image.shape[1]/self.CamFOV)) + self.image.shape[1]/2 # Number of pixels per degree,adjusted for shifting origin from centerline to top left
CameraY=np.dot(RadarAnglesV,(self.image.shape[0]/(39.375))) +480/2 # Number of pixels per degree,adjusted for shifting origin from centerline to top left
CirClr=np.array(CirClr)
CameraX=np.array(CameraX)
for idx3 in range(len(CameraX)):
if (CameraX[idx3]<=self.image.shape[1]):
LocalImage=cv2.circle(LocalImage, (int(CameraX[idx3]),int(CameraY[idx3])), 12, CirClr[idx3].tolist(),3)
LocalImage=cv2.putText(LocalImage,str(idx3),(int(CameraX[idx3]),int(CameraY[idx3])),self.font,1,(255,105,180),2)
#Now Plot Camera Trakcs:
if (self.PlotArg=='2') or (self.PlotArg=='4'): # Then, plot Cam stuff
if not hasattr(self,'CurrentCamTracks'):
return # Skip
CurrentCamTracks=self.CurrentCamTracks
RectClr=[]
for jdx in range(len(CurrentCamTracks.tracks)):
if (CurrentCamTracks.tracks[jdx].Stat.data>=1) and (CurrentCamTracks.tracks[jdx].Stat.data<14): #Current Track- green
RectClr.append(np.array([0,255,0]))
elif CurrentCamTracks.tracks[jdx].Stat.data<=0: # Candidate Tracks for initialization - blue
RectClr.append(np.array([255,0,0]))
else: # Candidate for Destructor-orange
RectClr.append(np.array([0,165,255]))
for idx2 in range(len(CurrentCamTracks.tracks)):
start=(int(CurrentCamTracks.tracks[idx2].yPx.data-CurrentCamTracks.tracks[idx2].width.data/2),int(CurrentCamTracks.tracks[idx2].zPx.data-CurrentCamTracks.tracks[idx2].height.data/2))
end= (int(CurrentCamTracks.tracks[idx2].yPx.data+CurrentCamTracks.tracks[idx2].width.data/2),int(CurrentCamTracks.tracks[idx2].zPx.data+CurrentCamTracks.tracks[idx2].height.data/2))
LocalImage=cv2.rectangle(LocalImage,start,end,RectClr[idx2].tolist(),2)
if (self.PlotArg=='1') or (self.PlotArg=='4'): # Only plot self.CombinedTracks
if not hasattr(self,'CombinedTracks'):
return
currCombinedTracks=self.CombinedTracks
RectClr=[]
for jdx in range(len(currCombinedTracks.tracks)):
RectClr.append(np.array([102,255,255])) # Yellow
for idx2 in range(len(currCombinedTracks.tracks)):
start=(int(currCombinedTracks.tracks[idx2].yPx.data-currCombinedTracks.tracks[idx2].width.data/2),int(currCombinedTracks.tracks[idx2].zPx.data-currCombinedTracks.tracks[idx2].height.data/2))
end= (int(currCombinedTracks.tracks[idx2].yPx.data+currCombinedTracks.tracks[idx2].width.data/2),int(currCombinedTracks.tracks[idx2].zPx.data+currCombinedTracks.tracks[idx2].height.data/2))
LocalImage=cv2.rectangle(LocalImage,start,end,RectClr[idx2].tolist(),2)
self.image_pub.publish(self.bridge.cv2_to_imgmsg(LocalImage, "bgr8"))
rospy.loginfo_once('Image is being published')
def CamRdrCombine(self):
if not hasattr(self,'CurrentCamTracks') or (not hasattr(self,'CurrentRdrTracks')):
return
self.CombinedTracks=trackArrayCam()
n=len(self.CurrentCamTracks.tracks)
LocalRdrYArr=[]
for rdx in range(len(self.CurrentRdrTracks.tracks)):
temp1=np.divide(self.CurrentRdrTracks.tracks[rdx].y.data,self.CurrentRdrTracks.tracks[rdx].x.data)
temp2=-np.degrees(np.arctan(temp1.astype(float)))
LocalRdrYArr.append(np.dot(temp2,(self.image.shape[1]/self.CamFOV)) + self.image.shape[1]/2+self.HorzOffset) # Gives all Y-coord (pixels) of all radar tracks
for jdx in range(n):
radius=(self.CurrentCamTracks.tracks[jdx].width.data+self.CurrentCamTracks.tracks[jdx].height.data)/2+self.CombGateThresh
centerY=self.CurrentCamTracks.tracks[jdx].yPx.data
for Rdx in range(len(LocalRdrYArr)):
if (abs(LocalRdrYArr[Rdx]-centerY)<=radius) or (self.CurrentCamTracks.tracks[jdx].confidence>=0.36):
self.CurrentCamTracks.tracks[jdx].Stat.data=99 #To indicate that the status is combined/validated
#TODO: Create a custom CombinedTracks Message that has both radar and Camera info?
self.CombinedTracks.tracks.append(self.CurrentCamTracks.tracks[jdx])
break
else:
continue
def trackManager(self,SensorData):
# startTime01=time.time()
self.trackMaintenance(SensorData)
# print('Time for Track Maint:' + str(time.time()-startTime01))
# startTime02=time.time()
self.trackInitiator(SensorData)
# print('Time for Track Init:' + str(time.time()-startTime02))
# startTime03=time.time()
self.trackDestructor(SensorData)
# print('Time for Track Destr:' + str(time.time()-startTime03))
# startTime04=time.time()
self.trackPlotter()
# print('Time for Track Plotter:' + str(time.time()-startTime04))
# startTime05=time.time()
if hasattr(self,'CurrentCamTracks') or hasattr(self,'CurrentRdrTracks'):
s= '# Cam Tracks: ' + (str(len(self.CurrentCamTracks.tracks)) if hasattr(self,'CurrentCamTracks') else 'None') + \
'; Rdr Tracks: ' + (str(len(self.CurrentRdrTracks.tracks)) if hasattr(self,'CurrentRdrTracks') else 'None') +'; # Combined Tracks:'\
+(str(len(self.CombinedTracks.tracks)) if hasattr(self,'CombinedTracks') else 'None')
print(s)
# print('Time printing in track manager:' + str(time.time()-startTime05))
def DataAssociation(self,SensorData,SensorIndices,Method):
if Method=="Hungarian":
pass
elif Method=="JPDA":
#Build A Validation Matrix if there are sufficient sensor data and tracks
if (len(SensorData)<1) or (len(self.CurrentRdrTracks.tracks)<1):
Yk=[]
else:
Yk=[]
#create empty Yk list, with given number of targets (currentTracks):
for l_dx in range(len(self.CurrentRdrTracks.tracks)):
Yk.append([])
C=3 # Number of false measurements per unit volume (assume), clutter density
Pd=0.9 #Probability of detection
# Create Clusters by cycling through SensorIndices, maintain
OpenList=[]
ClusterList=[]
for tempdx in range(len(self.CurrentRdrTracks.tracks)):
OpenList.append(tempdx)
OpenList=np.array(OpenList)
while any(OpenList):
tempClusterList=[]
tempClusterList.append(OpenList[0])
SensorRdgList=np.array(SensorIndices[OpenList[0]]).flatten()
OpenList=np.delete(OpenList,0) # Remove this element from searchable list of tracks, will be added later to ClusterList
# Chase down all other tracks that share common sensor measurements
n_meas=len(SensorData) # Total number of possible measurements
for m_dx in range(n_meas):
if m_dx in SensorRdgList:
ToDelOpenList=[]
for cluster_dx in OpenList:
indices = [i for i, obj in enumerate(SensorIndices[cluster_dx]) if obj == m_dx]
if any(indices) and (not (cluster_dx in tempClusterList)) :
tempClusterList.append(cluster_dx)
ToDelOpenList.append(cluster_dx) # To be Deleted from OpenList
np.append(SensorRdgList,SensorIndices[cluster_dx]).flatten()
OpenList=np.setdiff1d(OpenList,ToDelOpenList) # Remove from OpenList
else:
continue
# Now add this cluster to ClusterList
ClusterList.append(tempClusterList)
### Directly calculate Bjt if cluster size is 1:4 as per Bose Paper
# First calculate Yjt and Sjt:
for tdx in range(len(self.CurrentRdrTracks.tracks)):
# Calculate Y_jt and S_jt
# First Sjt, since it only depends on t, not j
Sjt=np.zeros((len(self.CurrentRdrTracks.tracks),3,3))
Hk=Mat_extractROS(self.CurrentRdrTracks.tracks[tdx].H)
Pk=Mat_extractROS(self.CurrentRdrTracks.tracks[tdx].P)
Sjt[tdx]=np.matmul(np.matmul(Hk,Pk),Hk.T)+self.R_rdr
def PjtCalc(meas_dx,target_dx,YjtLocal,Sjt):
if meas_dx in SensorIndices[target_dx]:
Pjt=Pd*np.exp(-np.matmul(np.matmul(YjtLocal[:,meas_dx].T,Sjt[target_dx]),YjtLocal[:,meas_dx])/2)/(np.sqrt((2*np.pi)*np.linalg.det(Sjt[target_dx])))
else:
Pjt=0
return Pjt
def GjCal(meas_dx,target_dx1, target_dx2,YjtLocal,Sjt):
Gj=PjtCalc(meas_dx,target_dx1,YjtLocal,Sjt)*PjtCalc(meas_dx,target_dx2,YjtLocal,Sjt)
return Gj
def YjtCalc(t_idx):
yt=np.array([self.CurrentRdrTracks.tracks[t_idx].x.data,self.CurrentRdrTracks.tracks[t_idx].y.data, \
self.CurrentRdrTracks.tracks[t_idx].Vc.data]).reshape(3,1)
Yjt=np.zeros((3,len(SensorIndices[t_idx])))
for jdx in range(len(SensorIndices[t_idx])):
yjt=np.array([SensorData[SensorIndices[t_idx][jdx]].pose.position.x,SensorData[SensorIndices[t_idx][jdx]].pose.position.y, \
np.sqrt(SensorData[SensorIndices[t_idx][jdx]].vx_comp**2+SensorData[SensorIndices[t_idx][jdx]].vy_comp**2)]).reshape(3,1)
Yjt[:,jdx]=(yjt-yt).reshape(3)
return Yjt
for clusterItem in ClusterList:
if len(clusterItem)==1:
B0t=C*(1-Pd)
Yjt=YjtCalc(clusterItem[0])
c=B0t
if len(SensorIndices[clusterItem[0]])>0:
Z_temp=np.zeros_like(Yjt[:,0])
for j_idx in range(len(SensorIndices[clusterItem[0]])):
Bjt=PjtCalc(j_idx,clusterItem[0],Yjt,Sjt)
c=c+Bjt
Z_temp=Z_temp+Bjt*Yjt[:,j_idx]
Yk[clusterItem[0]]=Z_temp/c
else: # No measurement associated with this particular object in clusterItem
pass # Already Yk[clusterItem[0]] =[] by default
elif len(clusterItem)==2:
P0=C*(1-Pd)
P1=P0
P2=P0
#Build P1:
Yjt1=YjtCalc(clusterItem[0])
for jdx in range(len(SensorIndices[clusterItem[0]])):
P1=P1+PjtCalc(jdx,clusterItem[0],Yjt1,Sjt)
# Build P2:
Yjt2=YjtCalc(clusterItem[1])
for jdx in range(len(SensorIndices[clusterItem[1]])):
P2=P2+PjtCalc(jdx,clusterItem[1],Yjt2,Sjt)
# Now build Bjts:
B0t1=P0*P2
c1=B0t1
# Calculate Bjt1:
Z_temp=np.zeros_like(Yjt1[:,0])
for j_idx in range(len(SensorIndices[clusterItem[0]])):
Bjt1=PjtCalc(j_idx,clusterItem[0],Yjt1,Sjt)*(P2-PjtCalc(j_idx,clusterItem[1],Yjt1,Sjt))
c1=c1+Bjt1
Z_temp=Z_temp+Bjt1*Yjt1[:,j_idx]
# Add to Yk:
Yk[clusterItem[0]]=Z_temp/c1
# Now Calculate Bjt2:
B0t2=P0*P1
c2=B0t2
Z_temp=np.zeros_like(Yjt2[:,0])
for j_idx in range(len(SensorIndices[clusterItem[1]])):
Bjt2=PjtCalc(j_idx,clusterItem[1],Yjt2,Sjt)*(P1-PjtCalc(j_idx,clusterItem[0],Yjt2,Sjt))
c2=c2+Bjt2
Z_temp=Z_temp+Bjt2*Yjt2[:,j_idx]
# Add to Yk:
Yk[clusterItem[1]]=Z_temp/c1
elif len(clusterItem)==2:
# Build P's:
P0=C*(1-Pd)
P1=P0
P2=P0
P3=P0
#Build P1:
Yjt1=YjtCalc(clusterItem[0])
for jdx in range(len(SensorIndices[clusterItem[0]])):
P1=P1+PjtCalc(jdx,clusterItem[0],Yjt1,Sjt)
# Build P2:
Yjt2=YjtCalc(clusterItem[1])
for jdx in range(len(SensorIndices[clusterItem[1]])):
P2=P2+PjtCalc(jdx,clusterItem[1],Yjt2,Sjt)
# Build P3:
Yjt3=YjtCalc(clusterItem[2])
for jdx in range(len(SensorIndices[clusterItem[2]])):
P3=P3+PjtCalc(jdx,clusterItem[2],Yjt3,Sjt)
# Now Build G's:
G23=0
for jdx in range(len(SensorIndices[clusterItem[0]])):
G23=G23+GjCal(jdx,1,2,Yjt1)
G13=0
for jdx in range(len(SensorIndices[clusterItem[1]])):
G13=G13+GjCal(jdx,0,2,Yjt2)
G12=0
for jdx in range(len(SensorIndices[clusterItem[2]])):
G12=G12+GjCal(jdx,0,1,Yjt3)
# Now Build Bjt's:
B0t1=P0*(P2*P3-G23)
c1=B0t1
B0t2=P0*(P1*P3-G13)
c2=B0t2
B0t3=P0*(P1*P2-G12)
c3=B0t3
Z_temp=np.zeros_like(Yjt1[:,0])
for j_idx in range(len(SensorIndices[clusterItem[0]])):
Bjt1=PjtCalc(j_idx,0,Yjt1,Sjt)*((P2-PjtCalc(j_idx,1,Yjt2,Sjt))*(P3-PjtCalc(meas_dx,2,Yjt3,Sjt))\
-(G23-GjCal(j_idx,1,2,Yjt1,Sjt)))
c1=c1+Bjt1
Z_temp=Z_temp+Bjt1*Yjt1[:,j_idx]
Yk[clusterItem[0]]=Z_temp/c1
Z_temp=np.zeros_like(Yjt2[:,0])
for j_idx in range(len(SensorIndices[clusterItem[0]])):
Bjt2=PjtCalc(j_idx,1,Yjt2,Sjt)*((P1-PjtCalc(j_idx,0,Yjt1,Sjt))*(P3-PjtCalc(meas_dx,2,Yjt3,Sjt))\
-(G13-GjCal(j_idx,0,2,Yjt2,Sjt)))
c2=c2+Bjt2
Z_temp=Z_temp+Bjt2*Yjt2[:,j_idx]
Yk[clusterItem[1]]=Z_temp/c2
Z_temp=np.zeros_like(Yjt3[:,0])
for j_idx in range(len(SensorIndices[clusterItem[0]])):
Bjt3=PjtCalc(j_idx,2,Yjt3,Sjt)*((P1-PjtCalc(j_idx,0,Yjt1,Sjt))*(P2-PjtCalc(meas_dx,1,Yjt2,Sjt))\
-(G12-GjCal(j_idx,0,1,Yjt3,Sjt)))
c3=c3+Bjt3
Z_temp=Z_temp+Bjt3*Yjt3[:,j_idx]
Yk[clusterItem[2]]=Z_temp/c3
# If cluster size is greater than 4, use approximation as per paper (TODO, if required)
else:
print('Large Cluster Density, Skipping Data Association!!')
pass
return Yk
elif Method=="Greedy": # Simple method that just outputs the closest UNUSED measurement
if isinstance(SensorData[0],RadarObj) or isinstance(SensorData[0],RadarObjMKZ):
# Sensor indices is a 2D python list, not numpy array
usedSensorIndices=[]
Yk=[] # A python list of sensor measurements corresponding to each CurrentTrack
for idx in range(len(self.CurrentRdrTracks.tracks)):
gateValX=[]
gateValY=[]
gateValRMS=[]
if len(SensorIndices[idx])==0:
Yk.append([])
continue
else:
# print(len(SensorIndices[idx]))
for jdx in range(len(SensorIndices[idx])):
gateValX.append(np.abs(SensorData[SensorIndices[idx][jdx]].pose.position.x-self.CurrentRdrTracks.tracks[idx].x.data))
gateValY.append(np.abs(SensorData[SensorIndices[idx][jdx]].pose.position.y-self.CurrentRdrTracks.tracks[idx].y.data))
gateValRMS.append(np.sqrt(((gateValX[jdx])**2+(gateValY[jdx])**2).astype(float)))
if np.min(gateValRMS)<=self.GateThreshRdr:
sensIdx=int(np.argmin(np.array(gateValRMS)))
gateValRMS=np.array(gateValRMS)
temp=SensorData[sensIdx]
while sensIdx in usedSensorIndices:
gateValRMS=np.delete(gateValRMS,sensIdx)
if len(gateValRMS)==0:
temp=[]
break
sensIdx=int(np.argmin(np.array(gateValRMS)))
temp=SensorData[sensIdx]
usedSensorIndices.append(sensIdx)
Yk.append(temp)
else:
Yk.append([])
elif isinstance(SensorData[0],CamObj): # Silimar To Radar above, gives closest unused sensor index
# Sensor indices is a 2D python list, not numpy array
usedSensorIndices=[]
Yk=[] # A python list of sensor measurements corresponding to each CurrentTrack
for idx in range(len(self.CurrentCamTracks.tracks)):
gateValX=[]
gateValY=[]
gateValRMS=[]
if len(SensorIndices[idx])==0:
Yk.append([])
continue
else:
# print(len(SensorIndices[idx]))
for jdx in range(len(SensorIndices[idx])):
gateValX.append(np.abs((SensorData[SensorIndices[idx][jdx]].xmin+SensorData[SensorIndices[idx][jdx]].xmax)/2-self.CurrentCamTracks.tracks[idx].yPx.data))
gateValY.append(np.abs((SensorData[SensorIndices[idx][jdx]].ymin+SensorData[SensorIndices[idx][jdx]].ymax)/2-self.CurrentCamTracks.tracks[idx].zPx.data))
gateValRMS.append(np.sqrt(((gateValX[jdx])**2+(gateValY[jdx])**2).astype(float)))
if np.min(gateValRMS)<=self.GateThreshCam:
sensIdx=int(np.argmin(np.array(gateValRMS)))
gateValRMS=np.array(gateValRMS)
temp=SensorData[sensIdx]
while sensIdx in usedSensorIndices:
gateValRMS=np.delete(gateValRMS,sensIdx)
if len(gateValRMS)==0:
temp=[]
break
sensIdx=int(np.argmin(np.array(gateValRMS)))
temp=SensorData[sensIdx]
usedSensorIndices.append(sensIdx)
Yk.append(temp)
else:
Yk.append([])
# Yk=[]
# for idx in range(len(self.CurrentCamTracks.tracks)):
# if len(SensorIndices[idx])==0:
# Yk.append([])
# continue
# else:
# Yk.append(SensorData[SensorIndices[idx][0]])
return Yk # An Array with same len as CurrentRdrTracks.tracks[]
def KalmanPropagate(self,SensorData):
if isinstance(SensorData[0],CamObj) and hasattr(self,'CurrentRdrTracks'): # TODO: hasattr part is just so we can get delT, this needs to be fixed
delT=(SensorData[0].header.stamp-self.CurrentRdrTracks.header.stamp)
delT=delT.to_sec()
for idx in range(len(self.CurrentCamTracks.tracks)):
Fk=np.eye(8)
Fk[0,4]=delT
Fk[1,5]=delT
Fk[2,6]=delT
Fk[3,7]=delT
self.CurrentCamTracks.tracks[idx].P=Mat_buildROS(Fk*Mat_extractROS(self.CurrentCamTracks.tracks[idx].P)*Fk.T+self.Q_cam)
track=self.CurrentCamTracks.tracks[idx]
StateVec=np.array([track.yPx.data,track.zPx.data,track.width.data,track.height.data,track.VyPx.data,\
track.VzPx.data,track.widthDot.data,track.heightDot.data])
StateVec=np.dot(Fk,StateVec.reshape(8,1))
self.CurrentCamTracks.tracks[idx].yPx.data=StateVec[0]
self.CurrentCamTracks.tracks[idx].zPx.data=StateVec[1]
self.CurrentCamTracks.tracks[idx].width.data=StateVec[2]
self.CurrentCamTracks.tracks[idx].height.data=StateVec[3]
self.CurrentCamTracks.tracks[idx].VyPx.data=StateVec[4]
self.CurrentCamTracks.tracks[idx].VzPx.data=StateVec[5]
self.CurrentCamTracks.tracks[idx].widthDot.data=StateVec[6]
self.CurrentCamTracks.tracks[idx].heightDot.data=StateVec[7]
elif isinstance(SensorData[0],RadarObj) or isinstance(SensorData[0],RadarObjMKZ):
delT=(SensorData[0].header.stamp-self.CurrentRdrTracks.header.stamp)
delT=delT.to_sec()
for idx in range(len(self.CurrentRdrTracks.tracks)):
x=self.CurrentRdrTracks.tracks[idx].x.data
y=self.CurrentRdrTracks.tracks[idx].y.data
Vc=self.CurrentRdrTracks.tracks[idx].Vc.data
Beta=self.CurrentRdrTracks.tracks[idx].B.data
psi=np.array([self.psi])
psiD=np.array([self.psiD])
Vt=self.Vt
F14=-delT*Vc*np.cos(((psi-Beta).astype(float)).astype(float))
F24=delT*Vc*np.sin(((psi-Beta).astype(float)).astype(float))
Fk=np.array([[1,delT*psiD,delT*np.sin(((psi-Beta).astype(float)).astype(float)),F14],[-delT*psiD,1,delT*np.cos(((psi-Beta).astype(float)).astype(float)),F24],[0,0,1,0],[0,0,0,1]])
self.CurrentRdrTracks.tracks[idx].F=Mat_buildROS(Fk)
self.CurrentRdrTracks.tracks[idx].P=Mat_buildROS(Fk*Mat_extractROS(self.CurrentRdrTracks.tracks[idx].P)*Fk.T+self.Q_rdr*(delT**2)/0.01)
StateVec=np.array([x, y,Vc,Beta])
A=np.array([[0,psiD,np.sin(((psi-Beta).astype(float)).astype(float)),0],[-psiD,0,0,np.cos(((psi-Beta).astype(float)).astype(float))],[0,0,0,0],[0,0,0,0]])
StateVec=StateVec.reshape(4,1)+delT*(A.dot(StateVec.reshape(4,1))+np.array([[0],[Vt],[0],[0]]))
self.CurrentRdrTracks.tracks[idx].x.data=StateVec[0]
self.CurrentRdrTracks.tracks[idx].y.data=StateVec[1]
self.CurrentRdrTracks.tracks[idx].Vc.data=StateVec[2]
self.CurrentRdrTracks.tracks[idx].B.data=StateVec[3]
def KalmanEstimate(self,SensorData,SensorIndices, Method):
if isinstance(SensorData[0],CamObj):
Yk=self.DataAssociation(SensorData,SensorIndices,'Greedy') # The Camera always uses Greedy method
for idx in range(len(Yk)):
if not Yk[idx]: # No suitable measurements found, move to potential destruct
if self.CurrentCamTracks.tracks[idx].Stat.data<=4:
self.CurrentCamTracks.tracks[idx].Stat.data+=1
else:
self.CurrentCamTracks.tracks[idx].Stat.data=4
continue
else:
#Reset status of track as a suitable msrmt has been found
self.CurrentCamTracks.tracks[idx].Stat.data=1
track=self.CurrentCamTracks.tracks[idx]
StateVec=np.array([track.yPx.data,track.zPx.data,track.width.data,track.height.data,track.VyPx.data,\
track.VzPx.data,track.widthDot.data,track.heightDot.data]).reshape([8,1])
Hk=self.CamMsrtmMatrixH
Pk=Mat_extractROS(self.CurrentCamTracks.tracks[idx].P)
K=np.dot(np.dot(Pk,Hk.T),np.linalg.inv(np.dot(np.dot(Hk,Pk),Hk.T)+self.R_cam))
self.CurrentCamTracks.tracks[idx].K=Mat_buildROS(K)
YkdataAssocStateVec=np.array([(Yk[idx].xmax+Yk[idx].xmin)/2,\
(Yk[idx].ymax+Yk[idx].ymin)/2,\
(Yk[idx].xmax-Yk[idx].xmin),\
(Yk[idx].ymax-Yk[idx].ymin)]).reshape([4,1])
StateVec=StateVec+np.matmul(K,(YkdataAssocStateVec-np.matmul(Hk,StateVec)))
self.CurrentCamTracks.tracks[idx].yPx.data=StateVec[0]
self.CurrentCamTracks.tracks[idx].zPx.data=StateVec[1]
self.CurrentCamTracks.tracks[idx].width.data=StateVec[2]
self.CurrentCamTracks.tracks[idx].height.data=StateVec[3]
self.CurrentCamTracks.tracks[idx].VyPx.data=StateVec[4]
self.CurrentCamTracks.tracks[idx].VzPx.data=StateVec[5]
self.CurrentCamTracks.tracks[idx].widthDot.data=StateVec[6]
self.CurrentCamTracks.tracks[idx].heightDot.data=StateVec[7]
Pk=np.dot((np.eye(8)-np.dot(K,Hk)),Pk)
self.CurrentCamTracks.tracks[idx].P=Mat_buildROS(Pk)
elif isinstance(SensorData[0],RadarObj) or isinstance(SensorData[0],RadarObjMKZ): # Use EKF from Truck Platooning paper:
# DatAscTime=time.time()
Yk=self.DataAssociation(SensorData,SensorIndices,Method) # Lists suitable measurements for each track
# print('Time for Data Assoc:' + str(time.time()-DatAscTime))
for idx in range(len(Yk)):
if ((Method=='JPDA') and len(Yk[idx])==0) or ((Method=='Greedy') and (Yk[idx]==[])): # No suitable measurements found, move to potential destruct
if self.CurrentRdrTracks.tracks[idx].Stat.data>=10:
self.CurrentRdrTracks.tracks[idx].Stat.data+=1
else:
self.CurrentRdrTracks.tracks[idx].Stat.data=10
continue
else:
#reset Status of track:
self.CurrentRdrTracks.tracks[idx].Stat.data=1
x=np.array(self.CurrentRdrTracks.tracks[idx].x.data).astype(float)
y=np.array(self.CurrentRdrTracks.tracks[idx].y.data).astype(float)
Vc=self.CurrentRdrTracks.tracks[idx].Vc.data
Beta=self.CurrentRdrTracks.tracks[idx].B.data
psi=np.array([self.psi])
psiD=np.array([self.psiD])
Vt=self.Vt
posNorm=np.sqrt(x**2+y**2)
H31=(Vc*np.sin((psi-Beta).astype(float))*y**2-x*y*(Vc*np.cos((psi-Beta).astype(float))-Vt))/(posNorm**3)
H32=(-Vc*np.sin((psi-Beta).astype(float))*x*y+x**2*(Vc*np.cos((psi-Beta).astype(float))-Vt))/(posNorm**3)
H33=x*np.sin((psi-Beta).astype(float))/posNorm+y*np.cos((psi-Beta).astype(float))/posNorm
H34=(-x*Vc*np.cos((psi-Beta).astype(float))+y*Vc*np.sin((psi-Beta).astype(float)))/posNorm
Hk=np.array([[1,0,0,0],[x/posNorm,y/posNorm,0,0],[H31,H32,H33,H34]])
self.CurrentRdrTracks.tracks[idx].H=Mat_buildROS(Hk)
Pk=Mat_extractROS(self.CurrentRdrTracks.tracks[idx].P)
K=np.dot(np.dot(Pk,Hk.T),np.linalg.inv((np.dot(np.dot(Hk,Pk),Hk.T)+self.R_rdr).astype(float)))
self.CurrentRdrTracks.tracks[idx].K=Mat_buildROS(K)
StateVec=np.array([x, y,Vc,Beta]).T
if Method=='Greedy':
rho=np.sqrt(Yk[idx].pose.position.x**2+Yk[idx].pose.position.y**2)
rhoDot=(Yk[idx].pose.position.x*np.sin((psi-Beta).astype(float))*Vc+Yk[idx].pose.position.y*np.cos((psi-Beta).astype(float))*Vc)/rho
YkdataAssocStateVec=np.array([Yk[idx].pose.position.x,rho,rhoDot]).T
StateVec=StateVec.reshape([4,1])
YkdataAssocStateVec=YkdataAssocStateVec.reshape([3,1])
StateVec=StateVec+np.matmul(K,(YkdataAssocStateVec-np.matmul(Hk,StateVec)))
StateVec=StateVec.flatten()
else: # If using JPDA
StateVec=StateVec+np.matmul(K,Yk[idx])
StateVec=StateVec.flatten()
self.CurrentRdrTracks.tracks[idx].x.data=StateVec[0]
self.CurrentRdrTracks.tracks[idx].y.data=StateVec[1]
self.CurrentRdrTracks.tracks[idx].Vc.data=StateVec[2]
self.CurrentRdrTracks.tracks[idx].B.data=StateVec[3]
Pk=np.dot((np.eye(4)-np.dot(K,Hk)),Pk)
self.CurrentRdrTracks.tracks[idx].P=Mat_buildROS(Pk)
def ValidationGate(self,SensorData,track):
SensorIdxOut=[]
if isinstance(SensorData[0],CamObj): #
StateVec=np.array([track.yPx.data,track.zPx.data,track.width.data,track.height.data,track.VyPx.data,\
track.VzPx.data,track.widthDot.data,track.heightDot.data])
Hk=self.CamMsrtmMatrixH
y_est=np.dot(Hk.reshape(4,8),StateVec.reshape(8,1))
Pk=Mat_extractROS(track.P)
SkInv=np.linalg.inv(np.dot(np.dot(Hk,Pk),Hk.T)+self.R_cam)
for jdx in range(len(SensorData)):
y=np.array([(SensorData[jdx].xmax+SensorData[jdx].xmin)/2,\
(SensorData[jdx].ymax+SensorData[jdx].ymin)/2,\
(SensorData[jdx].xmax-SensorData[jdx].xmin),\
(SensorData[jdx].ymax-SensorData[jdx].ymin)])
Temp=((y.reshape(4,1)-y_est).T.dot(SkInv)).dot(y.reshape(4,1)-y_est)
# print('GateVal')
# print(Temp)
# print(jdx)
if (Temp[0]<=self.GateThreshCam**2):
SensorIdxOut.append(jdx)
elif isinstance(SensorData[0],RadarObj) or isinstance(SensorData[0],RadarObjMKZ):
StateVec=np.array([track.x.data, track.y.data,track.Vc.data,track.B.data])
Hk=Mat_extractROS(track.H)
psi=np.array([self.psi])
y_est=np.dot(Hk.reshape(3,4),StateVec.reshape(4,1))
Pk=Mat_extractROS(track.P)
SkInv=np.linalg.inv((np.dot(np.dot(Hk,Pk),Hk.T)+self.R_rdr).astype(float))
for jdx in range(len(SensorData)):
Vc=np.sqrt((self.Vt+SensorData[jdx].vx)**2+SensorData[jdx].vy**2)
if SensorData[jdx].vy==0.0:
Beta=0
else:
Beta=SensorData[jdx].vx/SensorData[jdx].vy# This will be Vx/Vy for delphi esr
rho=np.sqrt(SensorData[jdx].pose.position.x**2+SensorData[jdx].pose.position.y**2)
rhoDot=(SensorData[jdx].pose.position.x*np.sin((psi-Beta).astype(float))*Vc+SensorData[jdx].pose.position.y*np.cos((psi-Beta).astype(float))*Vc)/rho
y=np.array([SensorData[jdx].pose.position.x,rho,rhoDot])
Temp=((y.reshape(3,1)-y_est).T.dot(SkInv)).dot(y.reshape(3,1)-y_est)
if (Temp[0]<=self.GateThreshRdr**2):
SensorIdxOut.append(jdx)
return SensorIdxOut # returns a python list, not numpy array
def BBoxBuilder(self,data):
self.BBoxStore=data
def CamMsrmts(self,DataIn):
# if DataIn.count>0:
data=self.BBoxStore
# data.header=DataIn.header
self.CamReadings=[]
for idx in range(len(data.bounding_boxes)):
if (data.bounding_boxes[idx].id in self.YoloClassList) and (data.bounding_boxes[idx].probability>0.3): # Only add if confident of detection
# if (data.bounding_boxes[idx].probability>0.3):
self.CamReadings=np.append(self.CamReadings,CamObj())
self.CamReadings[-1].header=data.header
self.CamReadings[-1].xmin=data.bounding_boxes[idx].xmin
self.CamReadings[-1].xmax=data.bounding_boxes[idx].xmax
self.CamReadings[-1].ymin=data.bounding_boxes[idx].ymin
self.CamReadings[-1].ymax=data.bounding_boxes[idx].ymax
self.CamReadings[-1].id=data.bounding_boxes[idx].id
self.CamReadings[-1].confidence=data.bounding_boxes[idx].probability
self.CamReadings=np.asarray(self.CamReadings)
#TODO: Change State Vec to just position, no widths/width rates
self.CamRawBBPlotter(self.CamReadings)
self.trackManager(self.CamReadings)
def CamRawBBPlotter(self,SensorData):
if not(hasattr(self,'image')) or (self.PlotArg=="1"):
return
LocalImage=self.image
for idx in range(len(SensorData)):
start=(int(SensorData[idx].xmin), int(SensorData[idx].ymin))
end= (int(SensorData[idx].xmax),int(SensorData[idx].ymax))
LocalImage=cv2.rectangle(LocalImage,start,end,(0,0,255),2)
self.image=LocalImage
def RdrMsrmtsNuSc(self,data):
#Build SensorData
self.RdrReadings=[]
for idx in range(len(data.objects)):
self.RdrReadings=np.append(self.RdrReadings,RadarObj())
self.RdrReadings[-1].pose=data.objects[idx].pose
self.RdrReadings[-1].vx=data.objects[idx].vx
self.RdrReadings[-1].vy=data.objects[idx].vy
self.RdrReadings[-1].vx_comp=data.objects[idx].vx_comp
self.RdrReadings[-1].vy_comp=data.objects[idx].vy_comp
self.RdrReadings[-1].header=data.header
self.RdrReadings=np.asarray(self.RdrReadings)
self.trackManager(self.RdrReadings)
def RdrMsrmtsMKZ(self,data):
#Build SensorData
# startTimemst=time.time()
self.RdrReadings=[]
for idx in range(len(data.objects)):
if (data.objects[idx].pose.pose.position.x==0.0) and (data.objects[idx].pose.pose.position.y==0.0) and (data.objects[idx].pose.covariance[0]==0.0):
continue #Zero entry, so skip it
self.RdrReadings=np.append(self.RdrReadings,RadarObjMKZ())
self.RdrReadings[-1].pose=data.objects[idx].pose.pose
self.RdrReadings[-1].vx=data.objects[idx].twist.twist.linear.x # Not used?
self.RdrReadings[-1].vy=data.objects[idx].twist.twist.linear.y # Not used?
self.RdrReadings[-1].vx_comp=self.velX+data.objects[idx].twist.twist.linear.x
self.RdrReadings[-1].vy_comp=self.velY+data.objects[idx].twist.twist.linear.y
self.RdrReadings[-1].header=data.objects[idx].header
self.RdrReadings[-1].id=data.objects[idx].id
self.RdrReadings= | np.asarray(self.RdrReadings) | numpy.asarray |
import sample_utils
import config
import parse_midas_data
import os.path
import pylab
import sys
import numpy
import diversity_utils
import gene_diversity_utils
import stats_utils
from math import log10,ceil
from numpy.random import randint
import core_gene_utils
import gzip
singleton_directory = '%ssingleton_rates/' % (parse_midas_data.data_directory)
intermediate_filename_template = '%s%s.txt.gz'
min_coverage = config.min_median_coverage
min_sample_size = 10
def load_singleton_rate_map(species_name):
# This definition is called whenever another script downstream uses the output of this data.
intermediate_filename = intermediate_filename_template % (singleton_directory, species_name)
singleton_rate_map = {}
if not os.path.isfile(intermediate_filename):
return singleton_rate_map
file = gzip.open(intermediate_filename,"r")
file.readline() # header
for line in file:
items = line.split(",")
if items[0].strip()!=species_name:
continue
sample_i = items[1].strip()
sample_j = items[2].strip()
type = items[3].strip()
num_singletons = float(items[4])
num_doubletons = float(items[5])
num_differences = float(items[6])
num_opportunities = float(items[7])
if type not in singleton_rate_map:
singleton_rate_map[type] = {}
if sample_i==sample_j:
num_singletons = 0
num_doubletons = 0
num_differences = 0
singleton_rate_map[type][sample_i, sample_j] = (num_singletons, num_doubletons, num_differences, num_opportunities)
return singleton_rate_map
def calculate_matrices_from_singleton_rate_map(singleton_rate_map, type, allowed_samples=[]):
# once the map is loaded, then we can compute rate matrices in this definition (so, it relies on the previous def)
sample_set = set([])
for sample in singleton_rate_map[type].keys():
sample_set.add(sample)
if len(allowed_samples)>0:
allowed_sample_set = set(allowed_samples)
else:
allowed_sample_set = sample_set
sample_set = set()
for sample_i, sample_j in singleton_rate_map[type]:
sample_set.add(sample_i)
sample_set.add(sample_j)
if len(allowed_samples)==0:
allowed_samples = list(sorted(allowed_sample_set))
samples = []
# preserve same order as allowed samples
for sample in allowed_samples:
if sample in sample_set:
samples.append(sample)
singleton_matrix = numpy.zeros((len(samples),len(samples)))*1.0
doubleton_matrix = numpy.zeros_like(singleton_matrix)
difference_matrix = numpy.zeros_like(singleton_matrix)
opportunity_matrix = numpy.zeros_like(singleton_matrix)
for i in xrange(0,len(samples)):
for j in xrange(0,len(samples)):
num_singletons, num_doubletons, num_differences, num_opportunities = singleton_rate_map[type][(samples[i], samples[j])]
if i==j:
num_doubletons = 0
singleton_matrix[i,j] = num_singletons
doubleton_matrix[i,j] = num_doubletons
difference_matrix[i,j] = num_differences
opportunity_matrix[i,j] = num_opportunities
#print singleton_matrix, opportunity_matrix
return samples, singleton_matrix, doubleton_matrix, difference_matrix, opportunity_matrix
def calculate_matrices_from_substitution_rate_map(substitution_rate_map, type, allowed_samples=[]):
# once the map is loaded, then we can compute rate matrices in this definition (so, it relies on the previous def)
samples, mut_difference_matrix, rev_difference_matrix, mut_opportunity_matrix, rev_opportunity_matrix = calculate_mutrev_matrices_from_substitution_rate_map( substitution_rate_map, type, allowed_samples)
difference_matrix = mut_difference_matrix+rev_difference_matrix
opportunity_matrix = mut_opportunity_matrix+rev_opportunity_matrix
return samples, difference_matrix, opportunity_matrix
if __name__=='__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--debug", help="Loads only a subset of SNPs for speed", action="store_true")
parser.add_argument("--chunk-size", type=int, help="max number of records to load", default=1000000000)
parser.add_argument("species", help="Name of specific species to run code on")
args = parser.parse_args()
debug = args.debug
chunk_size = args.chunk_size
species_name=args.species
good_species_list = [species_name]
os.system('mkdir -p %s' % singleton_directory)
# Load subject and sample metadata
sys.stderr.write("Loading sample metadata...\n")
subject_sample_map = sample_utils.parse_subject_sample_map()
sys.stderr.write("Done!\n")
# header for the output file.
record_strs = [", ".join(['Species', 'Sample1', 'Sample2', 'Type', 'Num_muts', 'Num_revs', 'Num_mut_opportunities', 'Num_rev_opportunities'])]
for species_name in good_species_list:
sys.stderr.write("Loading haploid samples...\n")
# Only plot samples above a certain depth threshold that are confidently phaseable.
snp_samples = diversity_utils.calculate_haploid_samples(species_name, debug=debug)
if len(snp_samples) < min_sample_size:
sys.stderr.write("Not enough haploid samples!\n")
continue
# Only consider one sample per person
snp_samples = snp_samples[sample_utils.calculate_unique_samples(subject_sample_map, sample_list=snp_samples)]
sys.stderr.write("Proceeding with %d haploid samples!\n" % len(snp_samples))
sys.stderr.write("Loading whitelisted genes...\n")
core_genes = core_gene_utils.parse_core_genes(species_name)
non_shared_genes = core_gene_utils.parse_non_shared_reference_genes(species_name)
shared_pangenome_genes = core_gene_utils.parse_shared_genes(species_name)
sys.stderr.write("Done! %d core genes and %d shared genes and %d non-shared genes\n" % (len(core_genes), len(shared_pangenome_genes), len(non_shared_genes)))
# Analyze SNPs, looping over chunk sizes.
# Clunky, but necessary to limit memory usage on cluster
# Load SNP information for species_name
sys.stderr.write("Loading SNPs for %s...\n" % species_name)
sys.stderr.write("(core genes only...)\n")
snp_doubleton_count_matrix = numpy.array([])
snp_difference_count_matrix = numpy.array([])
snp_singleton_count_matrix = numpy.array([])
snp_singleton_opportunity_matrix = numpy.array([])
syn_doubleton_count_matrix = numpy.array([])
syn_difference_count_matrix = numpy.array([])
syn_singleton_count_matrix = numpy.array([])
syn_singleton_opportunity_matrix = numpy.array([])
non_doubleton_count_matrix = numpy.array([])
non_difference_count_matrix = numpy.array([])
non_singleton_count_matrix = numpy.array([])
non_singleton_opportunity_matrix = numpy.array([])
core_doubleton_count_matrix = numpy.array([])
core_difference_count_matrix = numpy.array([])
core_singleton_count_matrix = numpy.array([])
core_singleton_opportunity_matrix = numpy.array([])
final_line_number = 0
while final_line_number >= 0:
sys.stderr.write("Loading chunk starting @ %d...\n" % final_line_number)
dummy_samples, allele_counts_map, passed_sites_map, final_line_number = parse_midas_data.parse_snps(species_name, debug=debug, allowed_samples=snp_samples, chunk_size=chunk_size,initial_line_number=final_line_number, allowed_genes=non_shared_genes)
sys.stderr.write("Done! Loaded %d genes\n" % len(allele_counts_map.keys()))
# Calculate fixation matrix
sys.stderr.write("Calculating matrix of singletons...\n")
# Synonymous (4D)
chunk_syn_doubleton_count_matrix, chunk_syn_singleton_count_matrix, chunk_syn_difference_count_matrix, chunk_syn_singleton_opportunity_matrix = diversity_utils.calculate_singleton_matrix(allele_counts_map, passed_sites_map, allowed_genes=core_genes, allowed_variant_types=set(['4D']))
# Nonsynonymous (1D)
chunk_non_doubleton_count_matrix, chunk_non_singleton_count_matrix, chunk_non_difference_count_matrix, chunk_non_singleton_opportunity_matrix = diversity_utils.calculate_singleton_matrix(allele_counts_map, passed_sites_map, allowed_genes=core_genes, allowed_variant_types=set(['1D']))
# Core (all)
chunk_core_doubleton_count_matrix, chunk_core_singleton_count_matrix, chunk_core_difference_count_matrix, chunk_core_singleton_opportunity_matrix = diversity_utils.calculate_singleton_matrix(allele_counts_map, passed_sites_map, allowed_genes=core_genes)
# All
chunk_snp_doubleton_count_matrix, chunk_snp_singleton_count_matrix, chunk_snp_difference_count_matrix, chunk_snp_singleton_opportunity_matrix = diversity_utils.calculate_singleton_matrix(allele_counts_map, passed_sites_map)
sys.stderr.write("Done!\n")
if len(snp_singleton_count_matrix)==0:
snp_doubleton_count_matrix = numpy.zeros_like(chunk_snp_singleton_count_matrix)*1.0
snp_difference_count_matrix = numpy.zeros_like(chunk_snp_singleton_count_matrix)*1.0
snp_singleton_count_matrix = numpy.zeros_like(chunk_snp_singleton_count_matrix)*1.0
snp_singleton_opportunity_matrix = numpy.zeros_like(chunk_snp_singleton_count_matrix)*1.0
syn_doubleton_count_matrix = numpy.zeros_like(chunk_snp_singleton_count_matrix)*1.0
syn_difference_count_matrix = numpy.zeros_like(chunk_snp_singleton_count_matrix)*1.0
syn_singleton_count_matrix = numpy.zeros_like(chunk_snp_singleton_count_matrix)*1.0
syn_singleton_opportunity_matrix = numpy.zeros_like(chunk_snp_singleton_count_matrix)*1.0
non_doubleton_count_matrix = numpy.zeros_like(chunk_snp_singleton_count_matrix)*1.0
non_difference_count_matrix = numpy.zeros_like(chunk_snp_singleton_count_matrix)*1.0
non_singleton_count_matrix = numpy.zeros_like(chunk_snp_singleton_count_matrix)*1.0
non_singleton_opportunity_matrix = numpy.zeros_like(chunk_snp_singleton_count_matrix)*1.0
core_doubleton_count_matrix = | numpy.zeros_like(chunk_snp_singleton_count_matrix) | numpy.zeros_like |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 5 23:47:57 2021
@author: guo.1648
"""
# final version.
# referenced from NN_getDist_testCode_forBiggan.py and
# NN_getRepThreshPairImg_testCode_forBiggan.py.
# This code does the following:
# (1) generate a 32x32 sample sheet from images in dir .../chenqi_random_samples/
# (2) do NN query as in NN_getDist_testCode_forBiggan.py
# (3) threshold the matched pairs as in NN_getRepThreshPairImg_testCode_forBiggan.py
# Note: for MNIST (grayscale) dataset, still we use 3 channels (where each channel is the same)
# to compute the L2 norm! <-- so that we don't need to change thresholds
import cv2
import os
import re
import numpy as np
from shutil import copyfile
from sklearn.neighbors import NearestNeighbors
NNmatchDist_threshold_values = [10000, 9000, 8000, 7000]
dstFolder_thresh_list = ['NNmatchResult_threshold10000/','NNmatchResult_threshold9000/','NNmatchResult_threshold8000/','NNmatchResult_threshold7000/']
"""
#### for FLOWER_128_sub1000: 1000 images dataset
srcDir_sampleSheetImgs = '/scratch/BigGAN-PyTorch/samples/BigGAN_FLOWER_128_sub1000_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs16_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/scratch/BigGAN-PyTorch/FLOWER_128_sub1000_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/FLOWER_128_sub1000/Itr38950/'
# parameters:
im_size = 128
batch_size = 16 # i.e., each sample sheet is of 4x4 !!!!:
num_row = 4
num_col = 4
"""
"""
#### for FLOWER_128_sub2000: 2000 images dataset
srcDir_sampleSheetImgs = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/samples/BigGAN_FLOWER_128_sub2000_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs32_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/FLOWER_128_sub2000_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/FLOWER_128_sub2000/Itr29700/'
# parameters:
im_size = 128
batch_size = 32 # i.e., the sample sheet is of 5x6+2 !!!!:
num_row = 7
num_col = 5
"""
"""
#### for FLOWER_128_sub4000: 4000 images dataset
srcDir_sampleSheetImgs = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/samples/BigGAN_FLOWER_128_sub4000_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs56_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/FLOWER_128_sub4000_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/FLOWER_128_sub4000/Itr10700/'
# parameters:
im_size = 128
batch_size = 56 # i.e., each sample sheet is of 8x7 !!!!:
num_row = 8
num_col = 7
"""
"""
#### for FLOWER_128_sub6000: 6000 images dataset
srcDir_sampleSheetImgs = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/samples/BigGAN_FLOWER_128_sub6000_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs24_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/FLOWER_128_sub6000_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/FLOWER_128_sub6000/Itr17300/'
# parameters:
im_size = 128
batch_size = 24 # i.e., each sample sheet is of 6x4 !!!!:
num_row = 6
num_col = 4
"""
"""
#### for FLOWER_128: 8189 images dataset (the original FLOWER dataset)
srcDir_sampleSheetImgs = '/scratch/BigGAN-PyTorch/samples/BigGAN_FLOWER_128_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs48_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/scratch/BigGAN-PyTorch/FLOWER_128_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/FLOWER_128/Itr21500/'
# parameters:
im_size = 128
batch_size = 48 # i.e., each sample sheet is of 8x6 !!!!:
num_row = 8
num_col = 6
"""
"""
#### for CelebA_128_sub200: 200 images dataset
srcDir_sampleSheetImgs = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/samples/BigGAN_CelebA_128_sub200_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs32_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/CelebA_128_sub200_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/CelebA_128_sub200/Itr17850/'
# parameters:
im_size = 128
batch_size = 32 # i.e., the sample sheet is of 5x6+2 !!!!:
num_row = 7
num_col = 5
"""
"""
#### for CelebA_128_sub600: 600 images dataset
srcDir_sampleSheetImgs = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/samples/BigGAN_CelebA_128_sub600_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs32_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/CelebA_128_sub600_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/CelebA_128_sub600/Itr20450/'
# parameters:
im_size = 128
batch_size = 32 # i.e., the sample sheet is of 5x6+2 !!!!:
num_row = 7
num_col = 5
"""
"""
#### for CelebA_128_sub1000: 1000 images dataset
srcDir_sampleSheetImgs = '/scratch/BigGAN-PyTorch/samples/BigGAN_CelebA_128_sub1000_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs16_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/scratch/BigGAN-PyTorch/CelebA_128_sub1000_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/CelebA_128_sub1000/Itr37400/'
# parameters:
im_size = 128
batch_size = 16 # i.e., each sample sheet is of 4x4 !!!!:
num_row = 4
num_col = 4
"""
"""
#### for CelebA_128_sub4000: 4000 images dataset
srcDir_sampleSheetImgs = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/samples/BigGAN_CelebA_128_sub4000_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs32_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/CelebA_128_sub4000_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/CelebA_128_sub4000/Itr19600/'
# parameters:
im_size = 128
batch_size = 32 # i.e., the sample sheet is of 5x6+2 !!!!:
num_row = 7
num_col = 5
"""
"""
#### for CelebA_128_sub8000: 8000 images dataset
srcDir_sampleSheetImgs = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/samples/BigGAN_CelebA_128_sub8000_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs32_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/CelebA_128_sub8000_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/CelebA_128_sub8000/Itr23550/'
# parameters:
im_size = 128
batch_size = 32 # i.e., the sample sheet is of 5x6+2 !!!!:
num_row = 7
num_col = 5
"""
"""
#### for MNIST_128_sub10000: 10000 images dataset
srcDir_sampleSheetImgs = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/samples/BigGAN_MNIST_128_sub10000_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs16_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/MNIST_128_sub10000_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/MNIST_128_sub10000/Itr35600/'
# parameters:
im_size = 128
batch_size = 16 # i.e., each sample sheet is of 4x4 !!!!:
num_row = 4
num_col = 4
"""
"""
#### for MNIST_128_sub30000: 30000 images dataset
srcDir_sampleSheetImgs = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/samples/BigGAN_MNIST_128_sub30000_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs16_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/MNIST_128_sub30000_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/MNIST_128_sub30000/Itr37300/'
# parameters:
im_size = 128
batch_size = 16 # i.e., each sample sheet is of 4x4 !!!!:
num_row = 4
num_col = 4
"""
"""
#### for MNIST_128_train: 60000 images dataset
srcDir_sampleSheetImgs = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/samples/BigGAN_MNIST_128_train_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs16_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/MNIST_128_train_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/MNIST_128_train/Itr35850/'
# parameters:
im_size = 128
batch_size = 16 # i.e., each sample sheet is of 4x4 !!!!:
num_row = 4
num_col = 4
"""
"""
#### for LSUN_128_sub200: 200 images dataset
srcDir_sampleSheetImgs = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/samples/BigGAN_LSUN_128_sub200_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs48_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/LSUN_128_sub200_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/LSUN_128_sub200/Itr12000/'
# parameters:
im_size = 128
batch_size = 48 # i.e., each sample sheet is of 8x6 !!!!:
num_row = 8
num_col = 6
"""
"""
#### for LSUN_128_sub1000: 1000 images dataset
srcDir_sampleSheetImgs = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/samples/BigGAN_LSUN_128_sub1000_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs48_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/LSUN_128_sub1000_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/LSUN_128_sub1000/Itr13450/'
# parameters:
im_size = 128
batch_size = 48 # i.e., each sample sheet is of 8x6 !!!!:
num_row = 8
num_col = 6
"""
"""
#### for LSUN_128_sub5000: 5000 images dataset
srcDir_sampleSheetImgs = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/samples/BigGAN_LSUN_128_sub5000_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs48_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/LSUN_128_sub5000_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/LSUN_128_sub5000/Itr9650/'
# parameters:
im_size = 128
batch_size = 48 # i.e., each sample sheet is of 8x6 !!!!:
num_row = 8
num_col = 6
"""
"""
#### for LSUN_128_sub10000: 10000 images dataset
srcDir_sampleSheetImgs = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/samples/BigGAN_LSUN_128_sub10000_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs48_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/LSUN_128_sub10000_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/LSUN_128_sub10000/Itr12000/'
# parameters:
im_size = 128
batch_size = 48 # i.e., each sample sheet is of 8x6 !!!!:
num_row = 8
num_col = 6
"""
#"""
#### for LSUN_128_sub30000: 30000 images dataset
srcDir_sampleSheetImgs = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/samples/BigGAN_LSUN_128_sub30000_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs48_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/LSUN_128_sub30000_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/LSUN_128_sub30000/Itr10400/'
# parameters:
im_size = 128
batch_size = 48 # i.e., each sample sheet is of 8x6 !!!!:
num_row = 8
num_col = 6
#"""
# for (1) and (2):
dstRootDir_viewSampleSheetImgs = dstRootDir + 'view_sampleSheetImgs/'
dstRootDir_NNmatchResult = dstRootDir + 'NNmatchResult/'
dstImgName_sampleSheetAll = dstRootDir + 'fakes.png'
dstImgName_NNmatchSheet = dstRootDir + 'NNmatchResultSheet.png'
dstTxtName_matchDist = dstRootDir + 'NNmatchDist.txt'
# for (3):
dstTxtName_matchDistThresh = dstRootDir + 'NNmatchDist_smallerThanThresh.txt'
def dealWith_sampleSheets():
# the list to store each image in all the sampleSheet_imgs
sample_img_list = []
for (dirpath, dirnames, filenames) in os.walk(srcDir_sampleSheetImgs):
#print(filenames)
for filename in filenames:
#print(filename)
if ".jpg" in filename:
print("------------------deal with---------------------")
print(filename)
fullImgName = srcDir_sampleSheetImgs + filename
sampleSheet_img = cv2.imread(fullImgName)
(sheet_img_height, sheet_img_width, ch) = sampleSheet_img.shape
single_img_height = sheet_img_height//num_row # 130
single_img_width = sheet_img_width//num_col # 130
# split the sampleSheet img into batch_size (here 16) images:
tmp_count = 1
for i in range(num_row):
for j in range(num_col):
start_row_pos = i*single_img_height
end_row_pos = (i+1)*single_img_height
start_col_pos = j*single_img_width
end_col_pos = (j+1)*single_img_width
single_sample_img = sampleSheet_img[start_row_pos:end_row_pos,start_col_pos:end_col_pos,:]
if tmp_count <= batch_size:
sample_img_list.append(single_sample_img)
tmp_count += 1
if len(sample_img_list) > 1024:
sample_img_list = sample_img_list[:1024] # only keep 1025 imgs
return sample_img_list
def generateSave_sampleSheetAll(sample_img_list):
# generate and save the 32x32 sample sheet from sample_img_list
(single_img_height, single_img_width, ch) = sample_img_list[0].shape
sample_sheet_all = np.zeros((single_img_height*32,single_img_width*32,ch),dtype=np.uint8)
for i in range(32):
for j in range(32):
start_row_pos = i*single_img_height
end_row_pos = (i+1)*single_img_height
start_col_pos = j*single_img_width
end_col_pos = (j+1)*single_img_width
match_img_idx = i*num_col + j
if match_img_idx < 1024:
sample_sheet_all[start_row_pos:end_row_pos,start_col_pos:end_col_pos,:] = sample_img_list[match_img_idx]
# save this sheet
cv2.imwrite(dstImgName_sampleSheetAll, sample_sheet_all)
return
def image_to_feature_vector(image):
# Note: the image is already resized to a fixed size.
# flatten the image into a list of raw pixel intensities:
return image.flatten()
def my_center_crop(origin_img, crop_size):
y,x,_ = origin_img.shape
startx = x//2-(crop_size//2)
starty = y//2-(crop_size//2)
origin_img_centCrop = origin_img[starty:starty+crop_size,startx:startx+crop_size]
return origin_img_centCrop
def generateTrainSet(len_featVec, dim):
all_origin_img_vecs = [] # this is our feature space
all_origin_img_names = []
# newly modified: different from that in FLOWER_128_sub1000 and FLOWER_128:
images_arr = np.load(srcRootDir_imgNpz)
#images_arr.files
images_list = list(images_arr['imgs'][:,0])
for filename in images_list:
#print("------------------deal with---------------------")
#print(filename)
#origin_img = cv2.imread(srcRootDir_originDataImg+filename)
origin_img = cv2.imread(filename)
origin_img_centCrop = my_center_crop(origin_img, min(origin_img.shape[0],origin_img.shape[1]))
# resize using linear interpolation:
origin_img_centCrop_resize = cv2.resize(origin_img_centCrop, dim)
# also convert it to feature vector:
origin_img_centCrop_resize_vec = image_to_feature_vector(origin_img_centCrop_resize)
assert(len(origin_img_centCrop_resize_vec)==len_featVec)
all_origin_img_vecs.append(origin_img_centCrop_resize_vec)
all_origin_img_names.append(filename)
return (np.array(all_origin_img_vecs), all_origin_img_names)
def combine_matchingResult(match_img_list):
# combine the match_img together into a corresponding sheet
(single_img_height, single_img_width, ch) = match_img_list[0].shape
match_img_sheet = | np.zeros((single_img_height*32,single_img_width*32,ch),dtype=np.uint8) | numpy.zeros |
################################################################################
#
# Copyright (c) 2017 University of Oxford
# Authors:
# <NAME> (<EMAIL>)
#
# This work is licensed under the Creative Commons
# Attribution-NonCommercial-ShareAlike 4.0 International License.
# To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc-sa/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
#
################################################################################
import os
import re
import numpy as np
from transform import build_se3_transform
from interpolate_poses import interpolate_vo_poses, interpolate_ins_poses
from velodyne import load_velodyne_raw, load_velodyne_binary, velodyne_raw_to_pointcloud
def build_pointcloud(lidar_dir, poses_file, extrinsics_dir, start_time, end_time, origin_time=-1):
"""Builds a pointcloud by combining multiple LIDAR scans with odometry information.
Args:
lidar_dir (str): Directory containing LIDAR scans.
poses_file (str): Path to a file containing pose information. Can be VO or INS data.
extrinsics_dir (str): Directory containing extrinsic calibrations.
start_time (int): UNIX timestamp of the start of the window over which to build the pointcloud.
end_time (int): UNIX timestamp of the end of the window over which to build the pointcloud.
origin_time (int): UNIX timestamp of origin frame. Pointcloud coordinates are relative to this frame.
Returns:
numpy.ndarray: 3xn array of (x, y, z) coordinates of pointcloud
numpy.array: array of n reflectance values or None if no reflectance values are recorded (LDMRS)
Raises:
ValueError: if specified window doesn't contain any laser scans.
IOError: if scan files are not found.
"""
if origin_time < 0:
origin_time = start_time
lidar = re.search('(lms_front|lms_rear|ldmrs|velodyne_left|velodyne_right)', lidar_dir).group(0)
timestamps_path = os.path.join(lidar_dir, os.pardir, lidar + '.timestamps')
timestamps = []
with open(timestamps_path) as timestamps_file:
for line in timestamps_file:
timestamp = int(line.split(' ')[0])
if start_time <= timestamp <= end_time:
timestamps.append(timestamp)
if len(timestamps) == 0:
raise ValueError("No LIDAR data in the given time bracket.")
with open(os.path.join(extrinsics_dir, lidar + '.txt')) as extrinsics_file:
extrinsics = next(extrinsics_file)
G_posesource_laser = build_se3_transform([float(x) for x in extrinsics.split(' ')])
poses_type = re.search('(vo|ins|rtk)\.csv', poses_file).group(1)
if poses_type in ['ins', 'rtk']:
with open(os.path.join(extrinsics_dir, 'ins.txt')) as extrinsics_file:
extrinsics = next(extrinsics_file)
G_posesource_laser = np.linalg.solve(build_se3_transform([float(x) for x in extrinsics.split(' ')]),
G_posesource_laser)
poses = interpolate_ins_poses(poses_file, timestamps, origin_time, use_rtk=(poses_type == 'rtk'))
else:
# sensor is VO, which is located at the main vehicle frame
poses = interpolate_vo_poses(poses_file, timestamps, origin_time)
pointcloud = np.array([[0], [0], [0], [0]])
if lidar == 'ldmrs':
reflectance = None
else:
reflectance = np.empty((0))
for i in range(0, len(poses)):
scan_path = os.path.join(lidar_dir, str(timestamps[i]) + '.bin')
if "velodyne" not in lidar:
if not os.path.isfile(scan_path):
continue
scan_file = open(scan_path)
scan = np.fromfile(scan_file, np.double)
scan_file.close()
scan = scan.reshape((len(scan) // 3, 3)).transpose()
if lidar != 'ldmrs':
# LMS scans are tuples of (x, y, reflectance)
reflectance = np.concatenate((reflectance, np.ravel(scan[2, :])))
scan[2, :] = np.zeros((1, scan.shape[1]))
else:
if os.path.isfile(scan_path):
ptcld = load_velodyne_binary(scan_path)
else:
scan_path = os.path.join(lidar_dir, str(timestamps[i]) + '.png')
if not os.path.isfile(scan_path):
continue
ranges, intensities, angles, approximate_timestamps = load_velodyne_raw(scan_path)
ptcld = velodyne_raw_to_pointcloud(ranges, intensities, angles)
reflectance = np.concatenate((reflectance, ptcld[3]))
scan = ptcld[:3]
scan = np.dot( | np.dot(poses[i], G_posesource_laser) | numpy.dot |
from __future__ import absolute_import
import unittest
from sklearn.datasets import load_iris as load_data
from sklearn.datasets import load_breast_cancer
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.cluster import KMeans
import numpy as np
import matplotlib.pyplot as plt
from scikitplot.metrics import plot_confusion_matrix
from scikitplot.metrics import plot_roc_curve
from scikitplot.metrics import plot_roc
from scikitplot.metrics import plot_ks_statistic
from scikitplot.metrics import plot_precision_recall_curve
from scikitplot.metrics import plot_precision_recall
from scikitplot.metrics import plot_silhouette
from scikitplot.metrics import plot_calibration_curve
from scikitplot.metrics import plot_cumulative_gain
from scikitplot.metrics import plot_lift_curve
def convert_labels_into_string(y_true):
return ["A" if x == 0 else x for x in y_true]
class TestPlotConfusionMatrix(unittest.TestCase):
def setUp(self):
np.random.seed(0)
self.X, self.y = load_data(return_X_y=True)
p = np.random.permutation(len(self.X))
self.X, self.y = self.X[p], self.y[p]
def tearDown(self):
plt.close("all")
def test_string_classes(self):
np.random.seed(0)
clf = LogisticRegression()
clf.fit(self.X, convert_labels_into_string(self.y))
preds = clf.predict(self.X)
plot_confusion_matrix(convert_labels_into_string(self.y), preds)
def test_normalize(self):
np.random.seed(0)
clf = LogisticRegression()
clf.fit(self.X, self.y)
preds = clf.predict(self.X)
plot_confusion_matrix(self.y, preds, normalize=True)
plot_confusion_matrix(self.y, preds, normalize=False)
def test_labels(self):
np.random.seed(0)
clf = LogisticRegression()
clf.fit(self.X, self.y)
preds = clf.predict(self.X)
plot_confusion_matrix(self.y, preds, labels=[0, 1, 2])
def test_hide_counts(self):
np.random.seed(0)
clf = LogisticRegression()
clf.fit(self.X, self.y)
preds = clf.predict(self.X)
plot_confusion_matrix(self.y, preds, hide_counts=True)
def test_true_pred_labels(self):
np.random.seed(0)
clf = LogisticRegression()
clf.fit(self.X, self.y)
preds = clf.predict(self.X)
true_labels = [0, 1]
pred_labels = [0, 2]
plot_confusion_matrix(self.y, preds,
true_labels=true_labels,
pred_labels=pred_labels)
def test_cmap(self):
np.random.seed(0)
clf = LogisticRegression()
clf.fit(self.X, self.y)
preds = clf.predict(self.X)
plot_confusion_matrix(self.y, preds, cmap='nipy_spectral')
plot_confusion_matrix(self.y, preds, cmap=plt.cm.nipy_spectral)
def test_ax(self):
np.random.seed(0)
clf = LogisticRegression()
clf.fit(self.X, self.y)
preds = clf.predict(self.X)
fig, ax = plt.subplots(1, 1)
out_ax = plot_confusion_matrix(self.y, preds)
assert ax is not out_ax
out_ax = plot_confusion_matrix(self.y, preds, ax=ax)
assert ax is out_ax
def test_array_like(self):
plot_confusion_matrix([0, 'a'], ['a', 0])
plot_confusion_matrix([0, 1], [1, 0])
plot_confusion_matrix(['b', 'a'], ['a', 'b'])
class TestPlotROCCurve(unittest.TestCase):
def setUp(self):
np.random.seed(0)
self.X, self.y = load_data(return_X_y=True)
p = np.random.permutation(len(self.X))
self.X, self.y = self.X[p], self.y[p]
def tearDown(self):
plt.close("all")
def test_string_classes(self):
np.random.seed(0)
clf = LogisticRegression()
clf.fit(self.X, convert_labels_into_string(self.y))
probas = clf.predict_proba(self.X)
plot_roc_curve(convert_labels_into_string(self.y), probas)
def test_ax(self):
np.random.seed(0)
clf = LogisticRegression()
clf.fit(self.X, self.y)
probas = clf.predict_proba(self.X)
fig, ax = plt.subplots(1, 1)
out_ax = plot_roc_curve(self.y, probas)
assert ax is not out_ax
out_ax = plot_roc_curve(self.y, probas, ax=ax)
assert ax is out_ax
def test_cmap(self):
np.random.seed(0)
clf = LogisticRegression()
clf.fit(self.X, self.y)
probas = clf.predict_proba(self.X)
plot_roc_curve(self.y, probas, cmap='nipy_spectral')
plot_roc_curve(self.y, probas, cmap=plt.cm.nipy_spectral)
def test_curve_diffs(self):
np.random.seed(0)
clf = LogisticRegression()
clf.fit(self.X, self.y)
probas = clf.predict_proba(self.X)
ax_macro = plot_roc_curve(self.y, probas, curves='macro')
ax_micro = plot_roc_curve(self.y, probas, curves='micro')
ax_class = plot_roc_curve(self.y, probas, curves='each_class')
self.assertNotEqual(ax_macro, ax_micro, ax_class)
def test_invalid_curve_arg(self):
np.random.seed(0)
clf = LogisticRegression()
clf.fit(self.X, self.y)
probas = clf.predict_proba(self.X)
self.assertRaises(ValueError, plot_roc_curve, self.y, probas,
curves='zzz')
def test_array_like(self):
plot_roc_curve([0, 'a'], [[0.8, 0.2], [0.2, 0.8]])
plot_roc_curve([0, 1], [[0.8, 0.2], [0.2, 0.8]])
plot_roc_curve(['b', 'a'], [[0.8, 0.2], [0.2, 0.8]])
class TestPlotROC(unittest.TestCase):
def setUp(self):
np.random.seed(0)
self.X, self.y = load_data(return_X_y=True)
p = np.random.permutation(len(self.X))
self.X, self.y = self.X[p], self.y[p]
def tearDown(self):
plt.close("all")
def test_string_classes(self):
np.random.seed(0)
clf = LogisticRegression()
clf.fit(self.X, convert_labels_into_string(self.y))
probas = clf.predict_proba(self.X)
plot_roc(convert_labels_into_string(self.y), probas)
def test_ax(self):
np.random.seed(0)
clf = LogisticRegression()
clf.fit(self.X, self.y)
probas = clf.predict_proba(self.X)
fig, ax = plt.subplots(1, 1)
out_ax = plot_roc(self.y, probas)
assert ax is not out_ax
out_ax = plot_roc(self.y, probas, ax=ax)
assert ax is out_ax
def test_cmap(self):
np.random.seed(0)
clf = LogisticRegression()
clf.fit(self.X, self.y)
probas = clf.predict_proba(self.X)
plot_roc(self.y, probas, cmap='nipy_spectral')
plot_roc(self.y, probas, cmap=plt.cm.nipy_spectral)
def test_plot_micro(self):
np.random.seed(0)
clf = LogisticRegression()
clf.fit(self.X, self.y)
probas = clf.predict_proba(self.X)
plot_roc(self.y, probas, plot_micro=False)
plot_roc(self.y, probas, plot_micro=True)
def test_plot_macro(self):
np.random.seed(0)
clf = LogisticRegression()
clf.fit(self.X, self.y)
probas = clf.predict_proba(self.X)
plot_roc(self.y, probas, plot_macro=False)
plot_roc(self.y, probas, plot_macro=True)
def test_classes_to_plot(self):
np.random.seed(0)
clf = LogisticRegression()
clf.fit(self.X, self.y)
probas = clf.predict_proba(self.X)
plot_roc(self.y, probas, classes_to_plot=[0, 1])
plot_roc(self.y, probas, classes_to_plot=np.array([0, 1]))
def test_array_like(self):
plot_roc([0, 'a'], [[0.8, 0.2], [0.2, 0.8]])
plot_roc([0, 1], [[0.8, 0.2], [0.2, 0.8]])
plot_roc(['b', 'a'], [[0.8, 0.2], [0.2, 0.8]])
class TestPlotKSStatistic(unittest.TestCase):
def setUp(self):
np.random.seed(0)
self.X, self.y = load_breast_cancer(return_X_y=True)
p = np.random.permutation(len(self.X))
self.X, self.y = self.X[p], self.y[p]
def tearDown(self):
plt.close("all")
def test_string_classes(self):
np.random.seed(0)
clf = LogisticRegression()
clf.fit(self.X, convert_labels_into_string(self.y))
probas = clf.predict_proba(self.X)
plot_ks_statistic(convert_labels_into_string(self.y), probas)
def test_two_classes(self):
np.random.seed(0)
# Test this one on Iris (3 classes)
X, y = load_data(return_X_y=True)
clf = LogisticRegression()
clf.fit(X, y)
probas = clf.predict_proba(X)
self.assertRaises(ValueError, plot_ks_statistic, y, probas)
def test_ax(self):
np.random.seed(0)
clf = LogisticRegression()
clf.fit(self.X, self.y)
probas = clf.predict_proba(self.X)
fig, ax = plt.subplots(1, 1)
out_ax = plot_ks_statistic(self.y, probas)
assert ax is not out_ax
out_ax = plot_ks_statistic(self.y, probas, ax=ax)
assert ax is out_ax
def test_array_like(self):
plot_ks_statistic([0, 1], [[0.8, 0.2], [0.2, 0.8]])
plot_ks_statistic([0, 'a'], [[0.8, 0.2], [0.2, 0.8]])
plot_ks_statistic(['b', 'a'], [[0.8, 0.2], [0.2, 0.8]])
class TestPlotPrecisionRecallCurve(unittest.TestCase):
def setUp(self):
| np.random.seed(0) | numpy.random.seed |
import torch.utils.data as data
from PIL import Image
import torch
import numpy as np
import h5py
import json
import pdb
import random
from misc.utils import repackage_hidden, clip_gradient, adjust_learning_rate, decode_txt
from misc.readers import ImageFeaturesHdfReader
from torch.nn.functional import normalize
class train(data.Dataset): # torch wrapper
def __init__(self, input_img_h5, input_imgid, input_ques_h5, input_json, negative_sample, num_val, data_split):
print(('DataLoader loading: %s' % data_split))
print(('Loading image feature from %s' % input_img_h5))
if data_split == 'test':
split = 'val'
else:
split = 'train' # train and val split both corresponding to 'train'
f = json.load(open(input_json, 'r'))
self.itow = f['itow']
self.wtoi = f['wtoi']
self.img_info = f['img_' + split]
# get the data split.
total_num = len(self.img_info)
if data_split == 'train':
s = 0
# e = int((total_num) * 1)
e = int((total_num - num_val) * 1)
# e = 1000
elif data_split == 'val':
s = total_num - num_val
e = total_num
else:
s = 0
e = total_num
self.img_info = self.img_info[s:e]
print(('%s number of data: %d' % (data_split, e - s)))
self.hdf_reader = ImageFeaturesHdfReader(
input_img_h5, False)
self.imgid = json.load(open(input_imgid, 'r'))['imgid'][s:e]
print(('Loading txt from %s' % input_ques_h5))
f = h5py.File(input_ques_h5, 'r')
self.ques = f['ques_' + split][s:e]
self.ans = f['ans_' + split][s:e]
self.cap = f['cap_' + split][s:e]
self.ques_len = f['ques_len_' + split][s:e]
self.ans_len = f['ans_len_' + split][s:e]
self.cap_len = f['cap_len_' + split][s:e]
self.ans_ids = f['ans_index_' + split][s:e]
self.opt_ids = f['opt_' + split][s:e]
self.opt_list = f['opt_list_' + split][:]
self.opt_len = f['opt_len_' + split][:]
f.close()
self.ques_length = self.ques.shape[2]
self.ans_length = self.ans.shape[2]
self.his_length = self.ques_length + self.ans_length
self.vocab_size = len(self.itow)
print(('Vocab Size: %d' % self.vocab_size))
self.split = split
self.rnd = 10
self.negative_sample = negative_sample
def __getitem__(self, index):
# get the image
img_id = self.img_info[index]['imgId']
img = self.hdf_reader[img_id]
img = torch.from_numpy(img)
img = normalize(img, dim=0, p=2)
# get the history
his = | np.zeros((self.rnd, self.his_length)) | numpy.zeros |
# --------------------------------------------------------
# TAFSSL
# Copyright (c) 2019 IBM Corp
# Licensed under The Apache-2.0 License [see LICENSE for details]
# --------------------------------------------------------
import numpy as np
from utils.proto_semi import ProtoSemi
import time
import pickle
from utils.misc import print_params
from utils.misc import load_features
from utils.misc import print_msg
from utils.misc import avg, ci_95, parse_args
from utils.misc import get_color
from utils.misc import calc_acc
from utils.misc import get_features
import torch
import random
class ProtoSemiNoise(ProtoSemi):
def __init__(self, opt):
super().__init__(opt)
self.n_distract = None
def parse_feature(self, x):
z_support = x[:self.n_support * self.n_way]
z_semi = x[self.n_support * self.n_way:self.n_support * self.n_way + self.n_semi * (self.n_way + self.n_distract)]
z_query = x[self.n_support * self.n_way + self.n_semi * (self.n_way + self.n_distract):]
return z_support, z_semi, z_query
def get_batch(self, x):
z_support, z_semi, z_query = self.parse_feature(x) # (n_way, n_support, n_feat), (n_way, n_query, n_feat)
z_support = z_support.contiguous().view(self.n_way * self.n_support, -1)
z_semi = z_semi.contiguous().view((self.n_way + self.n_distract) * self.n_semi, -1)
z_query = z_query.contiguous().view(self.n_way * self.n_query, -1)
return z_support, z_semi, z_query
def run_exp(params, verbose):
print_params(params)
n_episodes = 10000
episode_params = dict(n_way=params.n_way, n_support=params.n_shot,
n_query=params.n_query, n_semi=params.n_semi, n_distract=params.n_distract)
model = ProtoSemiNoise(opt=params)
model = model.cuda()
train_mean, cl_data_file = load_features(params)
acc_list = []
start_time = time.perf_counter()
for i in range(1, n_episodes + 1):
acc = run_episode(train_mean, cl_data_file, model, **episode_params)
acc_list += acc
if i % verbose == 0:
print_msg(i, n_episodes, start_time, acc_list, acc)
res = [avg(acc_list[ind::len(acc)]) for ind in range(len(acc))]
ci = [ci_95(acc_list[ind::len(acc)]) for ind in range(len(acc))]
return res, ci
def create_episode(cl_data_file, n_way=5, n_support=5, n_query=15, n_semi=5, n_distract=5):
class_list = cl_data_file.keys()
select_class = random.sample(class_list, n_way + n_distract) # List with the class idx
z_all = []
for cl in select_class[:n_way]:
img_feat = cl_data_file[cl]
perm_ids = list(np.random.permutation(len(img_feat)).tolist())
z_all.append([np.squeeze(img_feat[perm_ids[ii]]) for ii in range(n_support + n_semi + n_query)]) # stack each batch
z_all = np.array(z_all) # (num ways, n_support + n_query, n_feat)
z_all = np.array(z_all) # (num ways, n_support + n_query, n_feat)
z_support = z_all[:, :n_support, :]
z_support = z_support.reshape((n_way * n_support, -1))
z_semi = z_all[:, n_support:n_support + n_semi, :]
z_semi = z_semi.reshape((n_way * n_semi, -1))
np.random.shuffle(z_semi)
z_query = z_all[:, n_support + n_semi:, :]
z_query = z_query.reshape((n_way * n_query, -1))
y = np.repeat(range(n_way), n_query)
if n_distract > 0:
z_noise = []
for cl in select_class[n_way:]:
img_feat = cl_data_file[cl]
perm_ids = list(np.random.permutation(len(img_feat)).tolist())
z_noise.append([np.squeeze(img_feat[perm_ids[ii]]) for ii in range(n_semi)]) # stack each batch
z_noise = np.array(z_noise) # (n_distract, n_query, n_feat)
z_noise = z_noise.reshape((n_distract * n_semi, -1))
z_semi = | np.concatenate((z_semi, z_noise)) | numpy.concatenate |
"""
Main routine used to calculate the sphere-overburden response using the semi-analytical solution described in <NAME> Smith, 2016. Geophysics 81(4), P. E265-E277
This script is called by sphereexe when the "plot response" button is clicked in the GUI
"""
#Imports
import numpy as np
import math
import quadpy
import matplotlib.pyplot as plt
import cProfile
import pandas as pd
class sphereresponse(object):
def __init__(self):
self.mu = 1.256637e-6 # permeability of free space
self.dipole_m = 1847300 # dipole moment of tx
self.rtx = np.array([0, 0, 120],dtype=np.int64) # Tx co-ordinates in array
self.radar = self.rtx[2] # height of transmitter above ground surface=
self.offset_tx_rx = np.array([125, 0, 56],dtype=np.int64) # offset from transmitter to receiver array
self.rrx = np.array([self.rtx[0] - self.offset_tx_rx[0], self.rtx[1] - self.offset_tx_rx[1],
self.rtx[2] - self.offset_tx_rx[2]],dtype=np.int64) # receiver co-ordinate array
self.rsp = np.array([0, 0, -100],dtype=np.int64) # sphere position array
self.a = 60.0 # sphere radius
self.sigma_sp = 0.5 # sphere conductivity
self.mtx = np.array([0, 0, 1],dtype=np.int64) # unit vector of transmitter dipole moment
self.interval = 101
self.prof_start = -1000
self.prof_end = 1000
self.profile_length = (self.prof_end - self.prof_start)
self.delta_x = math.floor((self.profile_length / (self.interval-1))) # Number of times field is calculated along profile
self.profile = np.zeros((1, self.interval)) # profile position vector
self.profile_rrx = np.zeros((1, self.interval))
# Default window centers
self.wc = np.array([0.0417,
0.04194,
0.04229,
0.04268,
0.04312,
0.04365,
0.04434,
0.04517,
0.04609,
0.04722,
0.04863,
0.05034,
0.05239,
0.05488,
0.05786,
0.06147,
0.06582,
0.07104,
0.07739,
0.08506,
0.09429,
0.10540,
0.11890,
0.13510,
0.15470,
0.1827])
self.nw = len(self.wc) # Number of windows
self.P = 4.01 * 1E-3 # Pulse length
self.bfreq = 25 # Frequency of transmitter waveform
self.T = 1 / self.bfreq # Period
self.H_tot_x = np.zeros((self.nw, self.interval)) # Response vectors
self.H_tot_y = np.zeros((self.nw, self.interval))
self.H_tot_z = np.zeros((self.nw, self.interval))
self.C_x = np.zeros((self.nw, self.interval)) # Induced sphere moment vectors
self.C_z = np.zeros((self.nw, self.interval))
self.H_ob1 = np.zeros((self.nw, self.interval)) # Overburden response vectors
self.H_ob2 = np.zeros((self.nw, self.interval))
self.H_ob3 = np.zeros((self.nw, self.interval))
self.sigma_ob = 1 / 30 # Conductivity of overburden in S/m
self.thick_ob = 2 # Thickness of overburden in m
self.apply_dip = 0 # if 1 then apply dipping sphere model
self.strike = 90 # Strike of sphereS
self.dip = 135 # Dip of sphere
self.wave = 1
def calculate(self):
def dh_obdt_xyz(mtx, dipole_m, rtx, rrx, O, mu, sigma_ob, thick_ob):
"""
Function evaluates the time-derivative of the x,y,z component of the overburden field
see equations Eq A-5 from Desmarais and Smith, 2016. Geophysics 81(4), P. E265-E277
"""
m_x = dipole_m * mtx[0]
m_y = dipole_m * mtx[1]
m_z = dipole_m * mtx[2]
rtx_x = rtx[0]
rtx_y = rtx[1]
rtx_z = rtx[2]
rrx_x = rrx[0]
rrx_y = rrx[1]
rrx_z = rrx[2]
if rrx_z > 0:
dh_obx = (-1 / (4 * math.pi)) * ((m_z * (6 * rrx_x - 6 * rtx_x)) / (mu * sigma_ob * thick_ob * (
(rrx_x - rtx_x) ** 2 + (rrx_y - rtx_y) ** 2 + (rrx_z + rtx_z + (2 *O) / (
mu * sigma_ob * thick_ob)) ** 2) ** (5 / 2)) - (6 * m_x * (rrx_z + rtx_z + (2 *O) / (
mu * sigma_ob * thick_ob))) / (mu * sigma_ob * thick_ob * ((rrx_x - rtx_x) ** 2 + (
rrx_y - rtx_y) ** 2 + (rrx_z + rtx_z + (2 *O)/ (
mu * sigma_ob * thick_ob)) ** 2) ** (5 / 2)) + (5 * (6 * rrx_x - 6 * rtx_x) * (
rrx_z + rtx_z + (2 *O) / (mu * sigma_ob * thick_ob)) * (
m_x * (rrx_x - rtx_x) - m_z * (rrx_z + rtx_z +(2 *O) / (
mu * sigma_ob * thick_ob)) + m_y * (rrx_y - rtx_y))) / (
mu * sigma_ob * thick_ob * ((rrx_x - rtx_x) ** 2 + (
rrx_y - rtx_y) ** 2 + (rrx_z + rtx_z + (2 *O) / (
mu * sigma_ob * thick_ob)) ** 2) ** (7 / 2)))
else:
#(-1 / (4 * math.pi))
dh_obx1 = ((m_z * (6 * rrx_x - 6 * rtx_x)) / (
mu * sigma_ob * thick_ob * ((
rrx_x - rtx_x) ** 2 + (rrx_y - rtx_y) ** 2 + (rtx_z - rrx_z + (2 * O) / (
mu * sigma_ob * thick_ob)) ** 2) ** (5 / 2)) - (6 * m_x * (rtx_z - rrx_z + (2 * O) / (
mu * sigma_ob * thick_ob))) / (mu * sigma_ob * thick_ob * ((rrx_x - rtx_x) ** 2 + (
rrx_y - rtx_y) ** 2 + (rtx_z - rrx_z + (2 * O) / (mu * sigma_ob * thick_ob)) ** 2) ** (5 / 2)))
dh_obx2 = (5 * (6 * rrx_x - 6 * rtx_x) * (
rtx_z - rrx_z + (2 * O) / (mu * sigma_ob * thick_ob)) * (
m_x * (rrx_x - rtx_x) + m_y * (rrx_y - rtx_y) - m_z * (
rtx_z - rrx_z + (2 * O) / (mu * sigma_ob * thick_ob))) / (
mu * sigma_ob * thick_ob * ((rrx_x - rtx_x) ** 2 + (
rrx_y - rtx_y) ** 2 + (rtx_z - rrx_z + (2 * O) / (
mu * sigma_ob * thick_ob)) ** 2) ** (7 / 2)))
dh_obx = (-1 / (4 * math.pi))* (dh_obx1 + dh_obx2)
if rrx_z > 0:
dh_oby = (-1 / (4 * math.pi)) * ((m_z * (6 * rrx_y - 6 * rtx_y)) / (mu * sigma_ob * thick_ob * (
(rrx_x - rtx_x) ** 2 + (rrx_y - rtx_y) ** 2 + (rrx_z + rtx_z + (2 *O) / (
mu * sigma_ob * thick_ob)) ** 2) ** (5 / 2)) - (6 * m_y * (rrx_z + rtx_z + (2 *O) / (
mu * sigma_ob * thick_ob))) / (mu * sigma_ob * thick_ob * ((rrx_x - rtx_x) ** 2 + (
rrx_y - rtx_y) ** 2 + (rrx_z + rtx_z + (2 *O)/ (
mu * sigma_ob * thick_ob)) ** 2) ** (5 / 2)) + (5 * (6 * rrx_y - 6 * rtx_y) * (
rrx_z + rtx_z + (2 *O) / (mu * sigma_ob * thick_ob)) * (
m_x * (rrx_x - rtx_x) - m_z * (rrx_z + rtx_z +(2 *O) / (
mu * sigma_ob * thick_ob)) + m_y * (rrx_y - rtx_y))) / (
mu * sigma_ob * thick_ob * ((rrx_x - rtx_x) ** 2 + (
rrx_y - rtx_y) ** 2 + (rrx_z + rtx_z + (2 *O) / (
mu * sigma_ob * thick_ob)) ** 2) ** (7 / 2)))
else:
#(-1 / (4 * math.pi))
dh_oby1 = ((m_z * (6 * rrx_y - 6 * rtx_y)) / (
mu * sigma_ob * thick_ob * ((
rrx_x - rtx_x) ** 2 + (rrx_y - rtx_y) ** 2 + (rtx_z - rrx_z + (2 * O) / (
mu * sigma_ob * thick_ob)) ** 2) ** (5 / 2)) - (6 * m_y * (rtx_z - rrx_z + (2 * O) / (
mu * sigma_ob * thick_ob))) / (mu * sigma_ob * thick_ob * ((rrx_x - rtx_x) ** 2 + (
rrx_y - rtx_y) ** 2 + (rtx_z - rrx_z + (2 * O) / (mu * sigma_ob * thick_ob)) ** 2) ** (5 / 2)))
dh_oby2 = (5 * (6 * rrx_y - 6 * rtx_y) * (
rtx_z - rrx_z + (2 * O) / (mu * sigma_ob * thick_ob)) * (
m_x * (rrx_x - rtx_x) + m_y * (rrx_y - rtx_y) - m_z * (
rtx_z - rrx_z + (2 * O) / (mu * sigma_ob * thick_ob))) / (
mu * sigma_ob * thick_ob * ((rrx_x - rtx_x) ** 2 + (
rrx_y - rtx_y) ** 2 + (rtx_z - rrx_z + (2 * O) / (
mu * sigma_ob * thick_ob)) ** 2) ** (7 / 2)))
dh_oby = (-1 / (4 * math.pi))* (dh_obx1 + dh_obx2)
if rrx_z > 0:
dh_obz = (-1 / (4 * math.pi)) * ((6 * m_z * (rrx_z + rtx_z + (2 * O) / (mu * sigma_ob * thick_ob))) / (
mu * sigma_ob * thick_ob * ((rrx_x - rtx_x) ** 2 + (rrx_y - rtx_y) ** 2 + (
rrx_z + rtx_z + (2 * O) / (mu * sigma_ob * thick_ob)) ** 2) ** (5 / 2)) - (6 * (
m_x * (rrx_x - rtx_x) - m_z * (rrx_z + rtx_z + (2 *O) / (
mu * sigma_ob * thick_ob)) + m_y * (rrx_y - rtx_y))) / (mu * sigma_ob * thick_ob * ((
rrx_x - rtx_x) ** 2 + (rrx_y - rtx_y) ** 2 + (rrx_z + rtx_z + (2 * O) / (
mu * sigma_ob * thick_ob)) ** 2) ** (5 / 2)) + (m_z * (6 * rrx_z + 6 * rtx_z + (
12 * O) / (mu * sigma_ob * thick_ob))) / (mu * sigma_ob * thick_ob * (
(rrx_x - rtx_x) ** 2 + (rrx_y - rtx_y) ** 2 + (rrx_z + rtx_z + (2 * O)
/ (mu * sigma_ob * thick_ob)) ** 2) ** (5 / 2)) + (5 * (
6 * rrx_z + 6 * rtx_z + (12 * O) / (mu * sigma_ob * thick_ob)) * (rrx_z + rtx_z + (2 * O) / (
mu * sigma_ob * thick_ob)) * (m_x * (rrx_x - rtx_x) - m_z * (rrx_z + rtx_z + (
2 * O) / (mu * sigma_ob * thick_ob)) + m_y * (rrx_y - rtx_y))) / (
mu * sigma_ob * thick_ob * ((rrx_x - rtx_x) ** 2 + (rrx_y - rtx_y) ** 2 + (
rrx_z + rtx_z + (2 * O) / (mu * sigma_ob * thick_ob)) ** 2) ** (7 / 2)))
else:
dh_obz = (-1 / (4 * math.pi)) * ((6 * (m_x * (rrx_x - rtx_x) + m_y * (rrx_y - rtx_y) - m_z * (
rtx_z - rrx_z + (2 * O) / (mu * sigma_ob * thick_ob)))) / (mu * sigma_ob * thick_ob * (
(rrx_x - rtx_x) ** 2 + (rrx_y - rtx_y) ** 2 + (
rtx_z - rrx_z + (2 *O) / (mu * sigma_ob * thick_ob)) ** 2) ** (5 / 2)) - (m_z * (
6 * rtx_z - 6 * rrx_z + (12 * O) / (mu * sigma_ob * thick_ob))) / (
mu * sigma_ob * thick_ob * ((rrx_x - rtx_x) ** 2 + (rrx_y - rtx_y) ** 2 + (
rtx_z - rrx_z + (2 * O) / (mu * sigma_ob * thick_ob)) ** 2) ** (5 / 2)) - (
6 * m_z * (rtx_z - rrx_z + (2 * O) / (mu * sigma_ob * thick_ob))) / (
mu * sigma_ob * thick_ob * ((rrx_x - rtx_x) ** 2 + (
rrx_y - rtx_y) ** 2 + (rtx_z - rrx_z + (2 * O) / (mu * sigma_ob * thick_ob)) ** 2) ** (5 / 2)) - (
5 * (6 * rtx_z - 6 * rrx_z + (12 * O) / (mu * sigma_ob * thick_ob)) * (rtx_z - rrx_z + (
2 * O) / (mu * sigma_ob * thick_ob)) * (m_x * (rrx_x - rtx_x) + m_y * (
rrx_y - rtx_y) - m_z * (rtx_z - rrx_z + (2 *O) / (mu * sigma_ob * thick_ob)))) / (
mu * sigma_ob * thick_ob * ((rrx_x - rtx_x) ** 2 + (
rrx_y - rtx_y) ** 2 + (rtx_z - rrx_z + (2 * O) / (
mu * sigma_ob * thick_ob)) ** 2) ** (7 / 2)))
return np.array([dh_obx,dh_obz,dh_oby])
def h_ob_xyz(mtx, dipole_m, rtx, rrx, O, mu, sigma_ob, thick_ob):
"""
Function evaluates the x,y,z component of the overburden field
see equations Eq A-3 from Desmarais and Smith, 2016. Geophysics 81(4), P. E265-E277
"""
m_x = dipole_m * mtx[0]
m_y = dipole_m * mtx[1]
m_z = dipole_m * mtx[2]
rtx_x = rtx[0]
rtx_y = rtx[1]
rtx_z = rtx[2]
rrx_x = rrx[0]
rrx_y = rrx[1]
rrx_z = rrx[2]
if rrx_z > 0:
h_obx = (-1 / (4 * math.pi)) * (
m_x / ((rrx_x - rtx_x) ** 2 + (rrx_y - rtx_y) ** 2 + (rrx_z + rtx_z + (2 * O) / (
mu * sigma_ob * thick_ob)) ** 2) ** (3 / 2) - (
3 * (2 * rrx_x - 2 * rtx_x) * (m_x * (rrx_x - rtx_x) - m_z * (
rrx_z + rtx_z + (2 * O) / (mu * sigma_ob * thick_ob)) + m_y * (rrx_y - rtx_y))) / (
2 * ((
rrx_x - rtx_x) ** 2 + (rrx_y - rtx_y) ** 2 + (rrx_z + rtx_z + (2 * O) / (
mu * sigma_ob * thick_ob)) ** 2) ** (5 / 2)))
else:
h_obx = (-1 / (4 * math.pi)) * (
m_x / ((rrx_x - rtx_x) ** 2 + (rrx_y - rtx_y) ** 2 + (rtx_z - rrx_z + (2 * O) / (
mu * sigma_ob * thick_ob)) ** 2) ** (3 / 2) - (3 * (2 * rrx_x - 2 * rtx_x) * (m_x * (
rrx_x - rtx_x) + m_y * (rrx_y - rtx_y) - m_z * (rtx_z - rrx_z + (2 * O) / (
mu * sigma_ob * thick_ob)))) / (2 * ((rrx_x - rtx_x) ** 2 + (rrx_y - rtx_y) ** 2 + (
rtx_z - rrx_z + (2 * O) / (mu * sigma_ob * thick_ob)) ** 2) ** (5 / 2)))
if rrx_z > 0:
h_oby = (-1 / (4 * math.pi)) * (
m_y / ((rrx_x - rtx_x) ** 2 + (rrx_y - rtx_y) ** 2 + (rrx_z + rtx_z + (2 * O) / (
mu * sigma_ob * thick_ob)) ** 2) ** (3 / 2) - (
3 * (2 * rrx_y - 2 * rtx_y) * (m_x * (rrx_x - rtx_x) - m_z * (
rrx_z + rtx_z + (2 * O) / (mu * sigma_ob * thick_ob)) + m_y * (rrx_y - rtx_y))) / (
2 * ((
rrx_x - rtx_x) ** 2 + (rrx_y - rtx_y) ** 2 + (rrx_z + rtx_z + (2 * O) / (
mu * sigma_ob * thick_ob)) ** 2) ** (5 / 2)))
else:
h_oby = (-1 / (4 * math.pi)) * (
m_y / ((rrx_x - rtx_x) ** 2 + (rrx_y - rtx_y) ** 2 + (rtx_z - rrx_z + (2 * O) / (
mu * sigma_ob * thick_ob)) ** 2) ** (3 / 2) - (3 * (2 * rrx_y - 2 * rtx_y) * (m_x * (
rrx_x - rtx_x) + m_y * (rrx_y - rtx_y) - m_z * (rtx_z - rrx_z + (2 * O) / (
mu * sigma_ob * thick_ob)))) / (2 * ((rrx_x - rtx_x) ** 2 + (rrx_y - rtx_y) ** 2 + (
rtx_z - rrx_z + (2 * O) / (mu * sigma_ob * thick_ob)) ** 2) ** (5 / 2)))
if rrx_z > 0:
h_obz = (-1 / (4 * math.pi)) * (- m_z / ((rrx_x - rtx_x) ** 2 + (rrx_y - rtx_y) ** 2 + (rrx_z + rtx_z + (
2 * O) / (mu * sigma_ob * thick_ob)) ** 2) ** (3 / 2) - (3 * (2 * rrx_z + 2 * rtx_z + (
4 * O) / (mu * sigma_ob * thick_ob)) * (m_x * (rrx_x - rtx_x) - m_z * (rrx_z + rtx_z + (
2 * O) / (mu * sigma_ob * thick_ob)) + m_y * (rrx_y - rtx_y))) / (2 * ((
rrx_x - rtx_x) ** 2 + (rrx_y - rtx_y) ** 2 + (rrx_z + rtx_z + (2 * O) / (
mu * sigma_ob * thick_ob)) ** 2) ** (5 / 2)))
else:
h_obz = (-1 / (4 * math.pi)) * (m_z / ((rrx_x - rtx_x) ** 2 + (rrx_y - rtx_y) ** 2 + (rtx_z - rrx_z + (
2 * O) / (mu * sigma_ob * thick_ob)) ** 2) ** (3 / 2) + (3 * (2 * rtx_z - 2 * rrx_z + (
4 * O) / (mu * sigma_ob * thick_ob)) * (m_x * (rrx_x - rtx_x) + m_y * (
rrx_y - rtx_y) - m_z * (rtx_z - rrx_z + (2 * O) / (mu * sigma_ob * thick_ob)))) / (
2 * ((rrx_x - rtx_x) ** 2 + (rrx_y - rtx_y) ** 2 + (rtx_z - rrx_z + (
2 * O) / (mu * sigma_ob * thick_ob)) ** 2) ** (5 / 2)))
return np.array([h_obx,h_obz,h_oby])
def static(m, r):
"""
Function calculates the field of a dipole
see Eq A-5a from Desmarais and Smith, 2016. Geophysics 81(4), P. E265-E277
m is the magnetic field vector
r is the vector from the dipole to the field location
m is the dipole moment vector
multiply all components of mm by mu0 to get b field
"""
one_over_4pi = 1 / (4 * math.pi)
r2 = np.dot(r, r)
if r2 < 1.e-20:
h = 0.0
else:
a = one_over_4pi / (math.sqrt(r2) * r2)
b = np.dot(r, m) * 3 / r2
h = (b * r - m) * a
return h
def thetafunction_step(t, O, o, mu, sigma_sp, a, T):
"""
Function calculates the time-dependant part of a step-response of the sphere alone
see equations Eq 12-13 from Desmarais and Smith, 2016. Geophysics 81(4), P. E265-E277
"""
theta = 0
solver = 0
k = 0
while solver < 1:
k = k + 1
temp = (1 / (1 + np.exp(-(T/2) * ((k * math.pi) ** 2) / ( self.mu * self.sigma_sp * (self.a ** 2))))) * (
(6 / ((k * math.pi) ** 2)) * np.exp((o + O - t) * ((k * math.pi) ** 2) / (self.mu * self.sigma_sp *(self.a**2))))
theta = theta + temp
solver = np.linalg.lstsq(np.transpose(np.atleast_2d(temp)),np.transpose(np.atleast_2d(theta)),rcond=-1)[0]
return theta
def dh_tot_step(mtx, dipole_m, rtx, rsp, mu, sigma_ob, thick_ob, t, o, sigma_sp, a, T):
"""
Function calculates the x,y,z component of the first order induced moment at the sphere
see equations Eq 16 from Desmarais and Smith, 2016. Geophysics 81(4), P. E265-E277
"""
import quadpy # library that implements optimized linear integration techniques
s = 0.0
b = t
n = 1
start_points = np.linspace(s, b, n, endpoint=False)
h = (b - s) / n
end_points = start_points + h
intervals = np.array([start_points, end_points])
ob_array = h_ob_xyz(self.mtx, self.dipole_m, self.rtx, self.rsp, -o, self.mu, self.sigma_ob, self.thick_ob)
thetaz = thetafunction_step(t, 0, o, self.mu, self.sigma_sp, self.a, self.T)
scheme = quadpy.line_segment.gauss_kronrod(2)
val = scheme.integrate(
lambda O: -dh_obdt_xyz(self.mtx, self.dipole_m, self.rtx, self.rsp, O, self.mu, self.sigma_ob, self.thick_ob) * \
thetafunction_step(t, O, o, self.mu, self.sigma_sp, self.a, self.T),
intervals
)
return np.array([val[0] + (ob_array[0] * thetaz), val[1] + (ob_array[1] * thetaz), val[2] + (ob_array[2]*thetaz)])
def h_total_step_1storder(
mtx, dipole_m, rtx, offset_tx_rx, rsp, t, mu, sigma_ob, thick_ob, sigma_sp, a, P, apply_dip, dip, strike, wave, T):
"""
Function checks if waveform is being convolved and calls previous functions to calculate sphere-overburden response
"""
if hasattr(self.wave,"__len__"):
N = len(self.wave)
temp_z = 0
temp_x = 0
tempy_xyz = 0
tempy_z = 0
tempy_y = 0
temp_y = 0
H2x = 0
H2z = 0
H2y = 0
for i in list(range(0, N - 1, 10)):
temp_xyz = tempy_xyz + 2 * math.pi * (a ** 3) * (self.P/45) *(
dh_tot_step(self.mtx, self.dipole_m,[0, 0, self.rtx[2]], [-self.rtx[0], -self.rtx[1], self.rsp[2]],self.mu, self.sigma_ob, self.thick_ob, t,
(-self.P * i / (N - 2)), self.sigma_sp, self.a, self.T) + H2x) * ((self.wave[i+1] - self.wave[i])/(8.14*1e-6))
tempy_xyz = temp_xyz
#temp_z = tempy_z + 2 * math.pi * (a ** 3) * (self.P / (45)) * (
#dh_tot_step(self.mtx, self.dipole_m, [0, 0, self.rtx[2]], [-self.rtx[0], -self.rtx[1], self.rsp[2]], self.mu, self.sigma_ob, self.thick_ob, t, (
#-self.P * (i) / (N - 2)), self.sigma_sp, self.a, self.T)[1] + H2z) * ((self.wave[i + 1] - self.wave[i]) / (8.14 * 1e-6))
#tempy_z = temp_z
#temp_y = tempy_y + 2 * math.pi * (self.a ** 3) * (self.P/45) *(
#dh_tot_step(self.mtx, self.dipole_m,[0, 0, self.rtx[2]], [-self.rtx[0], -self.rtx[1], self.rsp[2]],self.mu, self.sigma_ob, self.thick_ob, self.t,
#(-self.P * i / (N - 2)), self.sigma_sp, self.a, self.T)[2] + H2x) * ((self.wave[i+1] - self.wave[i])/(8.14*1e-6))
#tempy_y = temp_y
convo_x = temp_xyz[0]
convo_z = temp_xyz[1]
convo_y = temp_xyz[2]
H_z = 0
H_xyz = 0
H_y = 0
# This loop convolves the tx waveform with x component
# store sphere moment
msp = np.array([convo_x[0], convo_y[0], convo_z[0]])
# dipping sphere model if applydip=1
if apply_dip == 1:
norm = np.array([(math.cos((90-dip)*(math.pi/180))) * (math.cos((strike-90)*(math.pi/180))),
math.sin((strike-90)*(math.pi/180)) * math.cos(((90-dip))*(math.pi/180)),
math.sin((90-dip)*(math.pi/180))])
# make the dip normal vector a unit vector
normt = math.sqrt(np.dot(norm, norm))
norm = norm/normt
mspdotnorm = np.dot(msp, norm)
# now scale the normal to have this strength and redirect the sphere
# moment to be in the dip direction
msp = mspdotnorm * norm
# calculate field using induced moment
H_tot_x = -(
np.dot([1, 0, 0], static(msp, (np.array([-offset_tx_rx[0], -offset_tx_rx[1], rtx[2] - offset_tx_rx[2]]) -
(np.array([-rtx[0], -rtx[1], rsp[2]]))))))
H_tot_z = np.dot([0, 0, 1], static(msp, (np.array([-offset_tx_rx[0], -offset_tx_rx[1], rtx[2] - offset_tx_rx[2]]) -
(np.array([-rtx[0], -rtx[1], rsp[2]])))))
H_tot_y = np.dot([0, 1, 0], static(msp, (np.array([-offset_tx_rx[0], -offset_tx_rx[1], rtx[2] - offset_tx_rx[2]]) -
(np.array([-rtx[0], -rtx[1], rsp[2]])))))
# calculate 0th order term (field of overburden alone) and convolve with waveform
for i in list(range(1, N - 1)):
H_xyz = H_xyz + ((self.P / N) * h_ob_xyz(self.mtx, self.dipole_m, [0, 0, self.rtx[2]], [-self.offset_tx_rx[0], -self.offset_tx_rx[1],
self.rtx[2] - self.offset_tx_rx[2]],
t + (self.P * (i) / (N - 1)), self.mu, self.sigma_ob, self.thick_ob) * (
(self.wave[i + 1] - self.wave[i])) / (8.14 * 1e-6))
xfinal = H_tot_x + H_xyz[0]
zfinal = H_tot_z - H_xyz[1]
yfinal = H_tot_y + H_xyz[2]
final_lst = np.array([xfinal, zfinal, yfinal])
return final_lst
else:
# This loop convolves the tx waveform with x component
temp = dh_tot_step(
self.mtx, self.dipole_m, [0, 0, self.rtx[2]], [-self.rtx[0], -self.rtx[1], self.rsp[2]], self.mu, self.sigma_ob, self.thick_ob, t, 0, self.sigma_sp, self.a, self.T)
# store sphere moment
msp = np.array([(2 * math.pi * (self.a ** 3)) * temp[0][0], (2 * math.pi * (self.a ** 3)) * temp[2][0], (2 * math.pi * (self.a ** 3)) * temp[1][0]])
# dipping sphere model if applydip=1
if apply_dip == 1:
norm = np.array([(math.cos((90-dip)*(math.pi/180))) * (math.cos((strike-90)*(math.pi/180))),
math.sin((strike-90)*(math.pi/180)) * math.cos(((90-dip))*(math.pi/180)),
math.sin((90-dip)*(math.pi/180))])
# make the dip normal vector a unit vector
normt = math.sqrt(np.dot(norm, norm))
norm = norm/normt
mspdotnorm = np.dot(msp, norm)
# now scale the normal to have this strength and redirect the sphere
# moment to be in the dip direction
msp = mspdotnorm * norm
# calculate field using induced moment
statics = static(msp, (np.array([-offset_tx_rx[0], -offset_tx_rx[1], rtx[2] - offset_tx_rx[2]]) -
(np.array([-rtx[0], -rtx[1], rsp[2]]))))
H_tot_x = -(np.dot([1, 0, 0], statics))
H_tot_z = | np.dot([0, 0, 1], statics) | numpy.dot |
import numpy as np
import cv2
import time
import os
import os.path
import h5py
import tensorflow as tf
import pickle
import pandas as pd
from numpy import genfromtxt
from keras.models import Sequential
from keras.layers import Conv2D, ZeroPadding2D, Activation, Input, concatenate
from keras.models import Model
from keras.layers.normalization import BatchNormalization
from keras.layers.pooling import MaxPooling2D, AveragePooling2D
from keras.layers.merge import Concatenate
from keras.layers.core import Lambda, Flatten, Dense
from keras.initializers import glorot_uniform
from keras.engine.topology import Layer
from keras import backend as K
from keras.models import load_model
K.set_image_data_format('channels_first')
| np.set_printoptions(threshold=np.nan) | numpy.set_printoptions |
"""
@brief test log(time=120s)
"""
import unittest
import warnings
import sys
from logging import getLogger
from contextlib import redirect_stdout
from io import StringIO
import numpy
import onnx
from scipy.sparse import coo_matrix, csr_matrix, SparseEfficiencyWarning
from scipy.special import ( # pylint: disable=E0611
expit as logistic_sigmoid, erf)
from scipy.spatial.distance import cdist
from onnx import TensorProto, __version__ as onnx_version
from onnx.helper import make_sparse_tensor, make_tensor
from onnx.defs import onnx_opset_version
from onnx.numpy_helper import from_array
from pyquickhelper.pycode import ExtTestCase
from pyquickhelper.texthelper import compare_module_version
from sklearn.utils.extmath import softmax
try:
from sklearn.utils._testing import ignore_warnings
except ImportError:
from sklearn.utils.testing import ignore_warnings
from skl2onnx.algebra.onnx_ops import ( # pylint: disable=E0611
OnnxAbs, OnnxAdd, OnnxAnd,
OnnxArgMax_11, OnnxArgMax,
OnnxArgMin_11, OnnxArgMin,
OnnxBatchNormalization,
OnnxAcos, OnnxAcosh, OnnxAsin, OnnxAsinh, OnnxAtan, OnnxAtanh,
OnnxAveragePool,
OnnxCast, OnnxCeil, OnnxClip,
OnnxCompress,
OnnxConcat, OnnxConv, OnnxConvTranspose,
OnnxConstant, OnnxConstant_9, OnnxConstant_11,
OnnxConstant_12, OnnxConstant_13,
OnnxConstantOfShape,
OnnxCos, OnnxCosh,
OnnxCumSum,
OnnxDequantizeLinear,
OnnxDet, OnnxDiv,
OnnxDropout, OnnxDropout_7,
OnnxEinsum, OnnxEqual, OnnxErf, OnnxExp, OnnxEyeLike,
OnnxFlatten, OnnxFloor,
OnnxGreater, OnnxGreaterOrEqual, OnnxGemm, OnnxGlobalAveragePool,
OnnxIdentity, OnnxIsNaN,
OnnxLess, OnnxLessOrEqual,
OnnxLog, OnnxLpNormalization,
OnnxMatMul, OnnxMax, OnnxMaxPool, OnnxMean, OnnxMin, OnnxMod, OnnxMul,
OnnxNeg, OnnxNot,
OnnxOr,
OnnxPad, OnnxPow,
OnnxQLinearConv, OnnxQuantizeLinear,
OnnxRange,
OnnxReciprocal,
OnnxReduceL1, OnnxReduceL2,
OnnxReduceLogSumExp, OnnxReduceMax, OnnxReduceMean, OnnxReduceMin,
OnnxReduceProd,
OnnxReduceSum, OnnxReduceSumApi11, OnnxReduceSum_11, OnnxReduceSum_1,
OnnxReduceSumSquare,
OnnxRelu, OnnxReshape,
OnnxRound,
OnnxScatterElements, OnnxShape, OnnxSlice, OnnxSigmoid, OnnxSign,
OnnxSin, OnnxSinh,
OnnxSize, OnnxSoftmax,
OnnxSplit, OnnxSplitApi11,
OnnxSqrt, OnnxSub, OnnxSum,
OnnxSqueeze, OnnxSqueezeApi11,
OnnxTan, OnnxTanh, OnnxTopK, OnnxTranspose,
OnnxUnsqueeze, OnnxUnsqueezeApi11
)
try:
from skl2onnx.algebra.onnx_ops import OnnxCelu
except ImportError:
OnnxCelu = None
try:
from skl2onnx.algebra.onnx_ops import OnnxBatchNormalization_14
except ImportError:
OnnxBatchNormalization_14 = None
from skl2onnx import __version__ as skl2onnx_version, __max_supported_opset__
from mlprodict.onnxrt import OnnxInference
from mlprodict.tools.asv_options_helper import (
get_opset_number_from_onnx, get_ir_version_from_onnx)
from mlprodict.onnxrt.validate.validate_python import validate_python_inference
from mlprodict.onnxrt.ops_cpu.op_batch_normalization import (
_batchnorm_test_mode, _batchnorm_training_mode)
from mlprodict.onnxrt.ops_cpu.op_average_pool import (
_get_output_shape, _pool, _get_pad_shape)
from mlprodict.onnxrt.ops_cpu.op_global_average_pool import _global_average_pool
from mlprodict.onnxrt.ops_cpu._op_onnx_numpy import ( # pylint: disable=E0611,E0401
topk_element_min_double, topk_element_max_double,
topk_element_fetch_double,
topk_element_min_float, topk_element_max_float, topk_element_fetch_float,
topk_element_min_int64, topk_element_max_int64, topk_element_fetch_int64)
from mlprodict.onnxrt.ops_cpu.op_celu import _vcelu1, pycelu
from mlprodict.onnxrt.ops_cpu.op_topk import topk_sorted_implementation
from mlprodict.onnxrt.ops_cpu.op_pad import _pad_impl
from mlprodict.onnxrt.ops_cpu.op_max_pool import (
_pool_get_output_shape, _pool_impl)
from mlprodict.onnxrt.ops_cpu.op_dropout import _dropout
from mlprodict.onnxrt.ops_cpu._op_helper import proto2dtype
from mlprodict.onnx_tools.onnx2py_helper import (
guess_proto_dtype, _elem_type_as_str)
from mlprodict.tools.data_types import (
FloatTensorType, Int64TensorType, DoubleTensorType, StringTensorType,
Int32TensorType, BooleanTensorType, UInt8TensorType,
Int16TensorType, Int8TensorType, UInt16TensorType,
UInt32TensorType, UInt64TensorType, Float16TensorType)
from mlprodict.testing.test_utils.quantized_tensor import (
QuantizedTensor, QuantizedBiasTensor, test_qlinear_conv)
from mlprodict.onnxrt.ops_cpu.op_qlinear_conv_ import ( # pylint: disable=W0611,E0611,E0401
test_qgemm0, test_qgemm1)
from mlprodict.onnxrt.ops_cpu.op_constant import Constant_12, Constant_11, Constant_9
try:
numpy_str = numpy.str_
except ImportError:
numpy_str = str
try:
numpy_bool = numpy.bool_
except ImportError:
numpy_bool = bool
sparse_support = []
sparse_no_numpy = []
python_tested = []
def make_coo_matrix(*args, **kwargs):
coo = coo_matrix(*args, **kwargs)
coo.row = coo.row.astype(numpy.int64)
coo.col = coo.col.astype(numpy.int64)
return coo
def wraplog():
# from datetime import datetime
def wrapper(fct):
def call_f(self):
# no = datetime.now()
# print('BEGIN %s' % fct.__name__)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always", DeprecationWarning)
fct(self)
# print('DONE %s - %r' % (fct.__name__, datetime.now() - no))
return call_f
return wrapper
class TestOnnxrtPythonRuntime(ExtTestCase): # pylint: disable=R0904
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
if __name__ == "__main__":
import pprint
print('-----------')
pprint.pprint(sparse_support)
print('-----------')
pprint.pprint(sparse_no_numpy)
print('-----------')
pprint.pprint(
list(sorted({_.__name__ for _ in python_tested})))
print('-----------')
def setUp(self):
logger = getLogger('skl2onnx')
logger.disabled = True
def test_opset_skl2onnx(self):
opset_mlprodict = get_opset_number_from_onnx()
opset_skl2onnx = __max_supported_opset__
self.assertGreater(opset_skl2onnx, opset_mlprodict)
def common_expected_shapes_types(self, oinf, inputs, got, onnx_cl, model_def,
raise_shape=False):
expected_types = oinf.infer_types()
self.assertEqual(set(got) & set(expected_types), set(got))
for k, v in got.items():
if expected_types[k] in (str, numpy.str_):
# Type mismatch: dtype('<U32') != <class 'str'>
continue
if v.dtype != expected_types[k]:
raise AssertionError(
"Type mismatch: %r != %r\nexpected_types=%r\ngot=%r"
"\n----\n%r" % (
v.dtype, expected_types[k], expected_types, got,
model_def))
try:
expected_shapes = oinf.infer_shapes()
self.assertEqual(set(got) & set(expected_shapes), set(got))
except RuntimeError as e:
if raise_shape:
raise e
warnings.warn("infer_shapes fails for operator %r." % onnx_cl)
res = oinf.infer_sizes(inputs)
self.assertIsInstance(res, dict)
@ignore_warnings(category=(RuntimeWarning, DeprecationWarning,
SparseEfficiencyWarning, PendingDeprecationWarning))
def common_test_onnxt_runtime_unary(self, onnx_cl, np_fct,
op_version=None,
outputs=None, debug=False,
do_sparse=True, raise_shape=False):
if op_version is None:
op_version = get_opset_number_from_onnx()
try:
onx = onnx_cl('X', output_names=['Y'], op_version=op_version)
except RuntimeError as e:
raise RuntimeError('onnx.opset={} op_version={}'.format(
get_opset_number_from_onnx(), op_version)) from e
X = numpy.array([[1, 2], [3, -4]], dtype=numpy.float64)
model_def = onx.to_onnx(
{'X': X.astype(numpy.float32)}, target_opset=op_version,
outputs=outputs)
if debug:
print(model_def)
python_tested.append(onnx_cl)
# python code
oinfpy = OnnxInference(model_def, runtime="python", inplace=True)
validate_python_inference(oinfpy, {'X': X.astype(numpy.float32)})
# no inplace
oinf = OnnxInference(model_def, inplace=False)
all_names = "\n".join(
"%s>=v%d" % (op.ops_.__class__.__name__,
op.ops_._schema.since_version) # pylint: disable=W0212
for op in oinf.sequence_)
if debug:
got = oinf.run({'X': X.astype(numpy.float32)},
verbose=1, fLOG=print)
else:
got = oinf.run({'X': X.astype(numpy.float32)})
self.assertEqual(list(sorted(got)), ['Y'])
self.common_expected_shapes_types(
oinf, {'X': X.astype(numpy.float32)}, got, onnx_cl,
model_def, raise_shape=raise_shape)
try:
self.assertEqualArray(np_fct(X), got['Y'], decimal=5)
except AssertionError as e:
raise AssertionError(
'onnx.opset={} op_version={}\n--ONNX--\n{}\n--NAMES--\n{}'.format(
get_opset_number_from_onnx(), op_version, model_def,
all_names)) from e
# inplace
oinf = OnnxInference(model_def, input_inplace=False, inplace=True)
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(np_fct(X), got['Y'], decimal=5)
# inplace2
onx2 = OnnxIdentity(
onnx_cl('X', op_version=op_version),
output_names=['Y'], op_version=op_version)
model_def2 = onx2.to_onnx(
{'X': X.astype(numpy.float32)}, target_opset=op_version,
outputs=outputs)
oinf = OnnxInference(model_def2, input_inplace=False, inplace=True)
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(np_fct(X), got['Y'], decimal=5)
# input inplace
expe = np_fct(X)
oinf = OnnxInference(model_def, input_inplace=True, inplace=True)
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(expe, got['Y'], decimal=5)
# sparse
if do_sparse:
row = numpy.array([0, 0, 1, 3, 1])
col = numpy.array([0, 2, 1, 3, 1])
data = numpy.array([1, 1, 1, 1, 1])
X = make_coo_matrix((data, (row.astype(numpy.int64),
col.astype(numpy.int64))),
shape=(4, 4), dtype=numpy.float32)
try:
exp = np_fct(X)
except (TypeError, NotImplementedError, ValueError) as e:
# Function np_fct does not work on sparse data.
sparse_no_numpy.append((onnx_cl.__name__, op_version, e))
return
model_def_sparse = onx.to_onnx(
{'X': X.astype(numpy.float32)}, target_opset=op_version)
oinf = OnnxInference(
model_def_sparse, input_inplace=False, inplace=True)
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualSparseArray(exp, got['Y'], decimal=5)
sparse_support.append(('UnOp', op_version, onnx_cl.__name__))
@ignore_warnings(category=(RuntimeWarning, DeprecationWarning,
SparseEfficiencyWarning, PendingDeprecationWarning))
def common_test_onnxt_runtime_binary(self, onnx_cl, np_fct,
dtype=numpy.float32,
op_version=None, debug=False,
raise_shape=False):
if op_version is None:
op_version = get_opset_number_from_onnx()
idi = numpy.identity(2, dtype=dtype)
onx = onnx_cl('X', idi, output_names=['Y'], op_version=op_version)
X = numpy.array([[1, 2], [3, -4]], dtype=numpy.float64)
model_def = onx.to_onnx({'X': X.astype(dtype)},
target_opset=op_version)
oinf = OnnxInference(model_def)
if debug:
got = oinf.run({'X': X.astype(dtype)}, verbose=1, fLOG=print)
else:
got = oinf.run({'X': X.astype(dtype)})
self.assertEqual(list(sorted(got)), ['Y'])
self.common_expected_shapes_types(
oinf, {'X': X.astype(dtype)}, got, onnx_cl, model_def,
raise_shape=raise_shape)
exp = np_fct(X, idi)
self.assertEqualArray(exp, got['Y'], decimal=5)
# python code
python_tested.append(onnx_cl)
oinfpy = OnnxInference(model_def, runtime="python", inplace=True)
validate_python_inference(oinfpy, {'X': X.astype(dtype)})
# sparse
idi = make_coo_matrix(numpy.identity(2)).astype(numpy.float32)
X = make_coo_matrix(numpy.array(
[[0, 2], [3, -4]], dtype=numpy.float32))
try:
exp = np_fct(X, idi)
except (TypeError, NotImplementedError, ValueError) as e:
# Function np_fct does not work on sparse data.
sparse_no_numpy.append((onnx_cl.__name__, op_version, e))
return
onx = onnx_cl('X', idi, output_names=['Y'], op_version=op_version)
model_def_sparse = onx.to_onnx({'X': X}, target_opset=op_version)
try:
oinf = OnnxInference(
model_def_sparse, input_inplace=False, inplace=True)
except RuntimeError as e:
raise RuntimeError(
"Unable to load sparse model\n{}".format(
model_def_sparse)) from e
if debug:
got = oinf.run({'X': X}, verbose=1, fLOG=print)
else:
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
if isinstance(exp, (coo_matrix, csr_matrix)):
self.assertEqualSparseArray(exp, got['Y'], decimal=5)
elif isinstance(exp, numpy.ndarray):
self.assertEqualArray(exp, got['Y'], decimal=5)
else:
self.assertEqual(exp, got['Y'])
sparse_support.append(('BinOp', op_version, onnx_cl.__name__))
@wraplog()
def test_onnxt_runtime_abs(self):
self.common_test_onnxt_runtime_unary(OnnxAbs, numpy.abs)
@wraplog()
def test_onnxt_runtime_abs_debug(self):
f = StringIO()
with redirect_stdout(f):
self.common_test_onnxt_runtime_unary(
OnnxAbs, numpy.abs, debug=True)
@wraplog()
def test_onnxt_runtime_acos(self):
self.common_test_onnxt_runtime_unary(OnnxAcos, numpy.arccos)
@wraplog()
def test_onnxt_runtime_acosh(self):
self.common_test_onnxt_runtime_unary(OnnxAcosh, numpy.arccosh)
@wraplog()
def test_onnxt_runtime_add(self):
self.common_test_onnxt_runtime_binary(OnnxAdd, numpy.add)
@wraplog()
def test_onnxt_runtime_and(self):
self.common_test_onnxt_runtime_binary(
OnnxAnd, numpy.logical_and, dtype=numpy.bool_)
@wraplog()
def test_onnxt_runtime_argmax(self):
opsets = list(range(11, get_opset_number_from_onnx() + 1))
opsets = ['11only'] + opsets
for opset in opsets:
with self.subTest(opset=opset):
X = numpy.array([[2, 1], [0, 1]], dtype=float)
if opset == '11only':
clarg = OnnxArgMax_11
opset = 11
br = True
else:
clarg = OnnxArgMax
br = False
onx = clarg('X', output_names=['Y'], keepdims=0,
op_version=opset)
model_def = onx.to_onnx({'X': X.astype(numpy.float32)},
target_opset=opset)
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(numpy.argmax(
X, axis=0), got['Y'], decimal=5)
self.common_expected_shapes_types(
oinf, {'X': X}, got, clarg, model_def)
if br:
continue
oinfpy = OnnxInference(
model_def, runtime="python", inplace=True)
validate_python_inference(
oinfpy, {'X': X.astype(numpy.float32)})
onx = OnnxArgMax('X', output_names=['Y'], axis=1, keepdims=0,
op_version=opset)
model_def = onx.to_onnx({'X': X.astype(numpy.float32)},
target_opset=opset)
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(numpy.argmax(X, axis=1).ravel(),
got['Y'].ravel())
onx = OnnxArgMax('X', output_names=['Y'], axis=1, keepdims=1,
op_version=opset)
model_def = onx.to_onnx({'X': X.astype(numpy.float32)},
target_opset=opset)
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(numpy.argmax(X, axis=1).ravel(),
got['Y'].ravel())
# sparse
X = make_coo_matrix(X, dtype=numpy.float32)
try:
exp = numpy.argmax(X, axis=1)
except (TypeError, NotImplementedError, ValueError) as e:
# Function np_fct does not work on sparse data.
sparse_no_numpy.append((OnnxArgMax.__name__, None, e))
return
model_def_sparse = onx.to_onnx({'X': X},
target_opset=opset)
oinf = OnnxInference(model_def_sparse, input_inplace=False)
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(exp, got['Y'], decimal=5)
X = numpy.array([[2, 1], [0, 1]], dtype=float)
sparse_support.append(('UnOp', None, OnnxArgMax.__name__))
python_tested.append(OnnxArgMax)
@unittest.skipIf(onnx_opset_version() < 12, reason="needs onnx 1.7.0")
@wraplog()
def test_onnxt_runtime_argmax_12(self):
self.assertGreater(onnx_opset_version(), 12)
from skl2onnx.algebra.onnx_ops import OnnxArgMax_12 # pylint: disable=E0611
X = numpy.array([[2, 2, 1], [0, 1, 1]], dtype=float)
onx = OnnxArgMax_12('X', output_names=['Y'], keepdims=0, axis=1,
select_last_index=1, op_version=12)
model_def = onx.to_onnx({'X': X.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(numpy.array([1, 2], dtype=numpy.int64),
got['Y'], decimal=5)
self.common_expected_shapes_types(
oinf, {'X': X}, got, OnnxArgMax_12, model_def)
@wraplog()
def test_onnxt_runtime_argmin(self):
opsets = list(range(11, get_opset_number_from_onnx() + 1))
opsets = ['11only'] + opsets
for opset in opsets:
with self.subTest(opset=opset):
if opset == '11only':
clarg = OnnxArgMin_11
opset = 11
br = True
else:
clarg = OnnxArgMin
br = False
X = numpy.array([[2, 1], [0, 1]], dtype=float)
onx = clarg('X', output_names=['Y'], keepdims=0,
op_version=opset)
model_def = onx.to_onnx({'X': X.astype(numpy.float32)},
target_opset=opset)
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(numpy.argmin(
X, axis=0), got['Y'], decimal=5)
if br:
continue
oinfpy = OnnxInference(
model_def, runtime="python", inplace=True)
validate_python_inference(
oinfpy, {'X': X.astype(numpy.float32)})
self.common_expected_shapes_types(
oinfpy, {'X': X.astype(numpy.float32)},
got, clarg, model_def)
onx = OnnxArgMin('X', output_names=['Y'], axis=1, keepdims=0,
op_version=opset)
model_def = onx.to_onnx({'X': X.astype(numpy.float32)},
target_opset=opset)
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(numpy.argmin(X, axis=1).ravel(),
got['Y'].ravel())
onx = OnnxArgMin('X', output_names=['Y'], axis=1, keepdims=1,
op_version=opset)
model_def = onx.to_onnx({'X': X.astype(numpy.float32)},
target_opset=opset)
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(numpy.argmin(X, axis=1).ravel(),
got['Y'].ravel())
# sparse
X = make_coo_matrix(X, dtype=numpy.float32)
try:
exp = numpy.argmin(X, axis=1)
except (TypeError, NotImplementedError, ValueError) as e:
# Function np_fct does not work on sparse data.
sparse_no_numpy.append((OnnxArgMin.__name__, None, e))
return
model_def_sparse = onx.to_onnx({'X': X}, target_opset=opset)
oinf = OnnxInference(model_def_sparse, input_inplace=False)
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(exp, got['Y'], decimal=5)
sparse_support.append(('UnOp', None, OnnxArgMin.__name__))
python_tested.append(OnnxArgMin)
@unittest.skipIf(onnx_opset_version() < 12, reason="needs onnx 1.7.0")
@wraplog()
def test_onnxt_runtime_argmin_12(self):
self.assertGreater(onnx_opset_version(), 12)
from skl2onnx.algebra.onnx_ops import OnnxArgMin_12 # pylint: disable=E0611
X = numpy.array([[2, 1, 1], [0, 0, 1]], dtype=float)
onx = OnnxArgMin_12('X', output_names=['Y'], keepdims=0, axis=1,
select_last_index=1, op_version=12)
model_def = onx.to_onnx({'X': X.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(numpy.array([2, 1], dtype=numpy.int64),
got['Y'], decimal=5)
self.common_expected_shapes_types(
oinf, {'X': X}, got, OnnxArgMin_12, model_def)
@wraplog()
def test_onnxt_runtime_asin(self):
self.common_test_onnxt_runtime_unary(OnnxAsin, numpy.arcsin)
@wraplog()
def test_onnxt_runtime_asinh(self):
self.common_test_onnxt_runtime_unary(OnnxAsinh, numpy.arcsinh)
@wraplog()
def test_onnxt_runtime_atan(self):
self.common_test_onnxt_runtime_unary(OnnxAtan, numpy.arctan)
@wraplog()
def test_onnxt_runtime_atanh(self):
self.common_test_onnxt_runtime_unary(OnnxAtanh, numpy.arctanh)
@wraplog()
def test_onnxt_runtime_atan2(self):
test_pairs = [[y, x]
for x in [3., -4., 0., -1., 1.]
for y in [5., -6., 0., -1., 1.]]
y_val = numpy.array([y for y, x in test_pairs], dtype=numpy.float32)
x_val = numpy.array([x for y, x in test_pairs], dtype=numpy.float32)
def atan2(y, x):
# size: 100000
# timeit arctan: 0.00205
# timeit arctan2: 0.00361
# timeit atan2: 0.00599
sx = numpy.sign(x)
sy = numpy.sign(y)
pi_part = (sy + sx * (sy ** 2 - 1)) * (sx - 1) * (-numpy.pi / 2)
atan_part = numpy.arctan(y / (x + (1 - sx ** 2))) * sx ** 2
return atan_part + pi_part
self.assertEqualArray(
numpy.arctan2(y_val, x_val), atan2(y_val, x_val), decimal=5)
def _expect_average_pool(self, node, inputs, outputs, opset=None):
if opset is None:
opset = get_opset_number_from_onnx()
ginputs = [
onnx.helper.make_tensor_value_info(
node.input[0], TensorProto.FLOAT, []), # pylint: disable=E1101,
]
goutputs = [
onnx.helper.make_tensor_value_info(
node.output[0], TensorProto.FLOAT, []), # pylint: disable=E1101,
]
model_def = onnx.helper.make_model(
opset_imports=[onnx.helper.make_operatorsetid('', opset)],
graph=onnx.helper.make_graph(
name='test_average_pool', inputs=ginputs, outputs=goutputs,
nodes=[node]))
oinf = OnnxInference(model_def)
got = oinf.run({n: v for n, v in zip(node.input, inputs)})
self.assertEqual(len(got), 1)
self.assertEqualArray(outputs[0], got['y'])
@wraplog()
def test_onnxt_runtime_average_pool(self):
node = onnx.helper.make_node(
'AveragePool', inputs=['x'], outputs=['y'],
kernel_shape=[2, 2], auto_pad='SAME_UPPER')
x = numpy.random.randn(1, 3, 32, 32).astype(numpy.float32)
x_shape = | numpy.shape(x) | numpy.shape |
from __future__ import division
import numpy as np
from cwc.data_wrappers import reject
from cwc.evaluation.confidence_intervals import ConfidenceIntervals
from sklearn.cross_validation import StratifiedKFold
from sklearn.preprocessing import minmax_scale
from sklearn import svm
from sklearn.datasets import fetch_mldata
from sklearn import datasets
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.preprocessing import label_binarize
def train_reject_model(x, r):
"""Train a classifier of training points
Returns a classifier that predicts high probability values for training
points and low probability values for reject points.
"""
model_rej = svm.SVC(C=1.0, gamma=0.016, kernel='rbf',
probability=True)
xr = np.vstack((x, r))
yr = np.hstack((np.ones(np.alen(x)), np.zeros(np.alen(r)))).T
model_rej.fit(xr, yr.astype(int))
return model_rej
def train_classifier_model(x, y):
model_clas = svm.SVC(C=10.0, gamma=0.002, kernel='rbf',
probability=True)
model_clas = model_clas.fit(x, y)
return model_clas
def calculate_mo_accuracy(model_clas_k, model_clas_u, model_rej_k,
model_rej_u, yk_test, yu_test):
yku_test = np.hstack((yk_test, yu_test)).reshape(-1, 1)
pred_clas_k = np.argmax(model_clas_k, axis=1)
pred_clas_u = np.argmax(model_clas_u, axis=1)
pred_clas = np.hstack((pred_clas_k, pred_clas_u)).reshape(-1,1)
y_rej = np.hstack((np.ones(np.alen(yk_test)), np.zeros(np.alen(yu_test)))).reshape(-1,1)
pred_rej_k = np.argmax(model_rej_k, axis=1)
pred_rej_u = np.argmax(model_rej_u, axis=1)
pred_rej = np.hstack((pred_rej_k, pred_rej_u)).reshape(-1,1)
multi_y = np.hstack((yku_test, y_rej))
multi_pred = np.hstack((pred_clas, pred_rej))
multi_pred_baseline = np.hstack((pred_clas, np.ones((np.alen(pred_rej),1))))
print('Accuracy f = {}'.format( | np.mean(yk_test == pred_clas_k) | numpy.mean |
##############################################
# SYNCMD Creation
##############################################
#
# Processing Steps:
# make_specgrid
# make_sedgrid
#
# Notes
# 1)Import assumptions (filters, avg DM) are set in DATAMODEL ITEMS block
# 2) Code defaults to overwriting output files
#
# Example
# import run_syncmd
# run_syncmd.make_specgrid(specfile='syncmd_spec_hi.grid.hd5')
# run_syncmd.make_sedgrid(sedfile='syncmd_sedsobs.fits',
# specfile='syncmd_spec_hi.grid.hd5')
#
##############################################
import numpy as np
import scipy
import scipy.stats
import os
import tables
from astropy.table import Table as apyTable
from beast.external.eztables import Table
from astropy import units
from beast.physicsmodel import creategrid
from beast.physicsmodel.grid import SpectralGrid
from beast.physicsmodel.stars import stellib
from beast.physicsmodel.dust import extinction
from beast.observationmodel import phot
from beast.observationmodel.vega import Vega
import beast.observationmodel.noisemodel.generic_noisemodel as noisemodel
# DATAMODEL ITEMS
filters = ['HST_WFC3_F225W','HST_WFC3_F275W','HST_WFC3_F336W',
'HST_ACS_WFC_F475W','HST_ACS_WFC_F550M','HST_ACS_WFC_F658N',
'HST_ACS_WFC_F814W','HST_WFC3_F110W','HST_WFC3_F160W']
additional_filters = ['GALEX_FUV', 'GALEX_NUV']
add_spectral_properties_kwargs = dict(filternames=filters + additional_filters)
def make_specgrid(specfile='syncmd_spec.grid.hd5',
fakein='syncmd_final-loz_parsec.fits',
distanceModulus=18.96, zsol=0.0142,
trimspec=False, grngspec=[1.15e3,3.0e4],
use_btsettl=False, btsettl_medres=False):
"""
Create spectral grid from FAKE output
Parameters
----------
specfile: str
file into which save the spectral grid; format = .grid.hd5
fakein: str
output file from FAKE used as input
"""
idistanceModulus = distanceModulus * units.mag
dmod = idistanceModulus.to(units.mag).value
distance = 10 ** ( (dmod / 5.) + 1 ) * units.pc
if use_btsettl:
osl = stellib.BTSettl(medres=btsettl_medres)
else:
osl = stellib.Tlusty() + stellib.Kurucz()
synraw = apyTable.read(fakein)
synin = Table()
synin.addCol('logg', synraw['MLOGG']*1.0)
synin.addCol('logT', synraw['MLOGT']*1.0)
synin.addCol('logL', (-0.4)*(synraw['MMBOL']-distanceModulus-4.77))
synin.addCol('Z', 10.**(synraw['MHZ'])*zsol)
synin.addCol('logA', np.log10(synraw['AGE'])+9.0)
synin.addCol('M_ini', synraw['MMASS']*1.0)
spgrid = osl.gen_spectral_grid_from_given_points(synin)
_distance = distance.to(units.pc).value
nameformat = add_spectral_properties_kwargs.pop('nameformat', '{0:s}') + '_nd'
spgrid.seds = spgrid.seds / (0.1 * _distance) ** 2 # Convert from 10 pc
spgrid = creategrid.add_spectral_properties(spgrid, nameformat=nameformat,
**add_spectral_properties_kwargs)
# Trim spec for good extLaw range
if trimspec:
sel = ((spgrid.lamb > grngspec[0]) & (spgrid.lamb < grngspec[1]))
spgrid.lamb=spgrid.lamb[sel]
spgrid.seds=spgrid.seds[:,sel]
# Write out file, remove if it exists
try:
os.remove(specfile)
except OSError:
pass
spgrid.writeHDF(specfile)
def make_sedgrid(sedfile='syncmd_sedsobs.fits', sedfilegrid=None,
specfile='syncmd_spec.grid.hd5',
astfile='ast_half1+3_wbg.fits',
av_fg=0.18, av_red_median=0.4, av_red_loc=0.0, av_red_sig=0.55,
av_unred_max=0.0, dmod_sig_old=0.15, dust_dmod_relative=-0.1,
sclh_ratio_max=10., sclh_ratio_min=1.,sclh_loga_transition=8.5,
useF99dust=False,
output_raw_cols=False, output_allraw_cols=False,
distanceModulus=18.96):
"""
Create SED grid from spectral grid, applying dust attenuation and
distance shifts. Write output SEDs into a FITS file.
Model includes age-dependent extinction, implemented as a simple two
component model (young stars, old stars; divided at age defined by
sclh_loga_transition) where variables are linked:
1) dmod_sig_old sets maximum DM, 2) dmod_sig_dust set by dmod_sig_old &
sclh_ratio_max, 3) dmod_sig_yng set by dmod_sig_dust & sclh_ratio_min
Parameters
----------
sedfile: str
output file for observed SEDs; format = .fits
sedfilegrid: str
output file for observed SEDs; format = .grid.hd5;
default=None; no grid file written unless param is passed
specfile: str
input file from make_specgrid; format = .grid.hd5
astfile: str
input file for ASTs; format = .fits
av_fg: float
foreground (MW) Av in magnitudes; default = 0.1 mag
av_red_median: float
median of lognormal dist. for Av in magnitudes; where
av_red_mean = av_red_median * exp(av_red_sig**2./2.0); default = 0.5 mag
av_red_loc: floag
zeropoint of lognormal dist.; default = 0.0 mag
av_red_sig: float
sigma of lognormal dist. for Av in magnitudes; default = 0.5 mag
av_unred_max: float
maximum Av for uniform unreddened dist. magnitudes; default = 0.1 mag
useF99dust: boolean
use F99 dust extinction curve instead of G03 SMC Avg; default = False
dmod_sig_old: float
sigma of normal dist. (centered at 0.) of distance modulus offsets,
where offsets are relative to mean set in preamble; default=0.15 mag
dust_dmod_relative: float
offset of dust from average distance, given in mag w.r.t. average
distance modulus; default=-0.05 mag
sclh_ratio_max: float
for step-function scale height model, this is large value adopted at
old ages when dust is in thin plane with respect to dust; default = 10.
sclh_ratio_min: float
for step function scale height model, this is small value adopted at
young ages when stars and dust are well-mixed; default = 1.
sclh_loga_transition: float
log(age/yr) of step-function transition point for scale height
difference; default = 8.5
output_raw_cols: boolean
flag to add RAW and ORIG columns to output file
output_allraw_cols: boolean
flag to add RAW_AV and RAW_DM columns to output file
"""
# Load spec grid
spgrid = SpectralGrid(specfile, backend='memory')
N = len(spgrid.grid)
# Compute Vega Fluxes
_, vega_flux, _ = Vega().getFlux(filters)
# Compute Orig Fluxes + Mags (w/o Av + Dmod Shifts)
av0_results = spgrid.getSEDs(filters)
mag_av0 = ((-2.5)*np.log10(av0_results.seds[:]/vega_flux))
### Set Distance Modulus Distribution
# Calc Constants
dmod_sig_dust = dmod_sig_old / sclh_ratio_max
dmod_sig_yng = dmod_sig_dust * sclh_ratio_min
# Current: Normal w/ sigma=dmod_sig
dmod_offset_raw = scipy.random.normal(0.,1.0,N)
# Add logic for assigning scalings -- current: step function
idmod_sig = np.zeros(N)
idmod_sig[spgrid['logA'] < sclh_loga_transition] = dmod_sig_yng
idmod_sig[spgrid['logA'] >= sclh_loga_transition] = dmod_sig_old
idmod_off = np.zeros(N)
idmod_off[spgrid['logA'] < sclh_loga_transition] = dust_dmod_relative
#idmod_off[spgrid['logA'] < sclh_loga_transition] = 0.0
dmod_offset = (dmod_offset_raw * idmod_sig) + idmod_off
# Set Av Distribution
# Current: Lognormal w/ median=av_red_median, sigma=av_red_sig
# -Dust Pos = dust_dmod_relative, sets f_red
# -Foreground Pop = Uniform from Av=0-av_unred_max
# -MW Foreground = av_fg added to all sources
av_draw = scipy.stats.lognorm.rvs(av_red_sig,loc=av_red_loc,
scale=av_red_median,size=N)
#av[np.where(av < 0.0)] = 0.0 #Clip negative Av tail
# Assign Av via Z distribution
z_erf = (dmod_offset-dust_dmod_relative)/dmod_sig_dust
av = av_draw * 0.5*(1.+scipy.special.erf(z_erf/np.sqrt(2.)))
f_red = -99.99
# Foreground Pop
#fgpop, = np.where(dmod_offset < dust_dmod_relative)
#n_fgpop = len(fgpop)
#av[fgpop] = scipy.random.uniform(0.0,av_unred_max,n_fgpop)
#f_red = 1.-(n_fgpop/float(N))
# Add Foreground Reddening
av_tot = av + av_fg
print('f_red = {:5.3f}'.format(f_red))
###########################################
# Redden Spectra
if useF99dust:
extLaw = extinction.Fitzpatrick99()
#extLaw = extinction.Cardelli89()
else:
extLaw = extinction.Gordon03_SMCBar()
extLaw_Av1 = extLaw.function(spgrid.lamb[:], 1.0)
spgrid.seds *= np.exp(-1. * (av[:,np.newaxis] * extLaw_Av1))
extLawMW = extinction.Fitzpatrick99()
extLawMW_Av1 = extLawMW.function(spgrid.lamb[:], 1.0)
spgrid.seds *= np.exp(-1. * (av_fg * extLawMW_Av1))
sed_results = spgrid.getSEDs(filters)
flux_avonly = sed_results.seds[:].copy()
mag_raw_av = ((-2.5)*np.log10(flux_avonly/vega_flux))
# Add Distance Offset
spgrid.seds = spgrid.seds * 10.**(-0.4*dmod_offset[:,np.newaxis])
mag_raw_dm = mag_av0.copy() + dmod_offset[:,np.newaxis]
# Compute SEDs
cols = {'Av': np.empty(N, dtype=float), 'Dmod_offset': np.empty(N, dtype=float)}
#'Rv': np.empty(N, dtype=float),
keys = spgrid.keys()
for key in keys:
cols[key] = np.empty(N, dtype=float)
cols['Av'] = av_tot
#cols['Rv'] = Rv
#cols['f_A'] = f_A
#cols['Rv_A'] = Rv_MW
cols['Dmod_offset'] = dmod_offset
# Compute reddened fluxes in grid columns as original, but no DMod shift
nameformat = add_spectral_properties_kwargs.pop('nameformat','{0:s}') + '_wd'
spgrid = creategrid.add_spectral_properties(spgrid, nameformat=nameformat,
**add_spectral_properties_kwargs)
sed_results = spgrid.getSEDs(filters)
_lamb = sed_results.lamb[:]
_seds = ((-2.5)*np.log10(sed_results.seds[:]/vega_flux))
for key in sed_results.grid.keys():
if key not in keys:
cols[key] = | np.empty(N, dtype=float) | numpy.empty |
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Copyright 2021- QuOCS Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import numpy as np
from quocslib.tools.randomgenerator import RandomNumberGenerator
def ptrace(rho, dimensions):
"""
Useful to have this implementation of the partial trace which uses einsums
TODO implement this in Python again
"""
return rho
def commutator(A, B):
return A @ B - B @ A
def gram_schmidt(A):
"""
Orthonormalize a set of linear independent vectors
:param A: Square matrix with linear independent vectors
:return A: Square matrix with orthonormalize vectors
"""
# Get the number of vectors.
n = A.shape[1]
for j in range(n):
# To orthogonalize the vector in column j with respect to the
# previous vectors, subtract from it its projection onto
# each of the previous vectors.
for k in range(j):
u_k = A[:, k]
A[:, j] -= np.dot(u_k, A[:, j]) * u_k / np.linalg.norm(u_k) ** 2
A[:, j] = A[:, j] / np.linalg.norm(A[:, j])
return A
def simplex_creation(
mean_value: np.array, sigma_variation: np.array, rng: RandomNumberGenerator = None
) -> np.array:
"""
Creation of the simplex
@return:
"""
ctrl_par_number = mean_value.shape[0]
##################
# Scale matrix:
# Explain what the scale matrix means here
##################
# First row
x0_scale = np.zeros((1, ctrl_par_number))
# Simplex matrix ( without first row )
simplex_matrix = np.diag(np.ones_like(sigma_variation))
# Add random number in the first column
if rng is None:
random_array = np.random.rand(ctrl_par_number)
else:
random_array = rng.get_random_numbers(ctrl_par_number)
random_array = random_array.reshape(
ctrl_par_number,
)
simplex_matrix[0, :] += np.sqrt(3) * (random_array - 0.5) * 2
# Orthogonalize set of vectors with gram_schmidt, and rescale with the normalization length
simplex_matrix_orthonormal = gram_schmidt(simplex_matrix.T)
# Rescale the vector with the sigma variation
simplex_matrix_orthogonal_rescaled = simplex_matrix_orthonormal @ np.diag(
sigma_variation
)
# Add the first row containing only zeros
x_t_norm = np.append(x0_scale, simplex_matrix_orthogonal_rescaled, axis=0)
# Offset matrix
x_offset = np.outer(np.ones((1, ctrl_par_number + 1)), mean_value)
# Start simplex matrix
StartSimplex = x_t_norm + x_offset
return StartSimplex
if __name__ == "__main__":
# TODO Move this main script to a test script
Nc = 4
ampl_var_1 = 2.0
ampl_var_2 = 0.7
f_norm = 1 / np.sqrt(2)
p_1 = (ampl_var_1 * f_norm) * np.ones(
2,
)
p_2 = (ampl_var_2 * f_norm) * np.ones(
2,
)
sc_vec = np.append(p_1, p_2)
x0_scale = np.zeros((1, Nc))
# Simplex matrix ( without first row )
simplex_m = np.diag(sc_vec)
# Add random number
simplex_m[0, :] += (
(sc_vec[0] / 10.0)
* (
np.random.rand(
Nc,
)
- 0.5
)
* 2
)
simplex_m_r = gram_schmidt(simplex_m.T, sc_vec).T
# Rescale accordingly to amplitude variation
# x_norm = A_norm.dot(np.diag(sc_vec))
# Add first row
x_t_norm = | np.append(x0_scale, simplex_m_r, axis=0) | numpy.append |
import numpy as np
class data_parser:
def __init__(self, data_file_name, verbose=1):
data_file = open(data_file_name,"r+")
data = data_file.readlines()
data_file.close()
if verbose == 1:
print('Total number of data lines read: ' + str(len(data)))
print('First few lines: ')
print(data[0:10])
# break data up
self.break_into_seperate_signals(data)
self.clean_control_data()
self.clean_wheel_data()
self.clean_range_data()
self.correspond_move_and_sense()
def break_into_seperate_signals(self, data):
data_clean = [line.split('\n')[0] for line in data if len(line.split('\n')[0]) > 0]
self._data_control = [line for line in data_clean if 'Control' in line]
self._data_wheel = [line for line in data_clean if 'LDir' in line]
self._data_sense = [line for line in data_clean if 'S1' in line]
def clean_control_data(self):
control_time = [int(line.split(' : ')[0]) for line in self._data_control]
control_value = [line.split(' : Control ')[-1] for line in self._data_control]
# controls come in way more often than movement or sensing data
# take away duplicates
index = 1
while index < len(control_time):
if control_time[index] == control_time[index-1]:
control_time.pop(index)
control_value.pop(index)
else:
index += 1
self._controls = {
'time': np.array(control_time),
'values': np.array(control_value)
}
def clean_wheel_data(self):
wheel_time = [int(line.split(' : ')[0]) for line in self._data_wheel]
wheel_lcnt = [int(line.split(' | ')[1].split(': ')[-1]) for line in self._data_wheel]
wheel_rcnt = [int(line.split(' | ')[-1].split(': ')[-1]) for line in self._data_wheel]
# remove sensings where neither wheel turned
# index = 0
# while index < len(wheel_time):
# if wheel_lcnt[index] == 0 and wheel_rcnt[index] == 0:
# wheel_lcnt.pop(index)
# wheel_rcnt.pop(index)
# wheel_time.pop(index)
# else:
# index += 1
self._wheels = {
'time': np.array(wheel_time),
'l_cnt': np.array(wheel_lcnt),
'r_cnt': np.array(wheel_rcnt)*-1 # flip the sign of the right wheel because it makes sense
}
def clean_range_data(self):
range_time = [int(line.split(' : ')[0]) for line in self._data_sense]
range_front = [float(line.split(' | ')[0].split(': ')[-1].split(' cm')[0]) for line in self._data_sense]
range_left = [float(line.split(' | ')[1].split(': ')[-1].split(' cm')[0]) for line in self._data_sense]
range_back = [float(line.split(' | ')[2].split(': ')[-1].split(' cm')[0]) for line in self._data_sense]
range_right = [float(line.split(' | ')[3].split(': ')[-1].split(' cm')[0]) for line in self._data_sense]
# turn 0 valued ranges into np.nan values
for i in range(len(range_front)):
if range_front[i] == 0.0:
range_front[i] = np.nan
if range_left[i] == 0.0:
range_left[i] = np.nan
if range_back[i] == 0.0:
range_back[i] = np.nan
if range_right[i] == 0.0:
range_right[i] = np.nan
self._ranges = {
'time': np.array(range_time),
'front': np.array(range_front),
'left': np.array(range_left),
'back': np.array(range_back),
'right': np.array(range_right)
}
def correspond_move_and_sense(self):
sense_move_vals = {}
for key in self._ranges.keys():
sense_move_vals[key] = []
for i,time_wheel in enumerate(self._wheels['time']):
sense_time_index = np.argmin( | np.abs(self._ranges['time'] - time_wheel) | numpy.abs |
import ast
import calendar
import constants as c
import datetime
import logging
import numpy
import matplotlib.pyplot as plt
import os
import qcutils
import scipy
import sys
import pdb
logger = logging.getLogger("pfp_log")
# code to integrate Ian's code into OzFluxQC
#def apply_turbulence_filter(data_dict,indicator):
#data_dict["NEE"] = numpy.where(indicator==0,numpy.nan,data_dict["NEE"])
def get_configs_dict(cf,ds):
# configs_dict = {'nan_value': -9999,
# 'minimum_temperature_spread': 5,
# 'step_size_days': 5,
# 'window_size_days': 15,
# 'min_pct_annual': 30,
# 'min_pct_noct_window': 20,
# 'min_pct_day_window': 50,
# 'output_plots': False,
# 'measurement_interval': 0.5,
# 'QC_accept_code': 0,
# 'plot_output_path': '/home/imchugh/Documents'}
configs_dict = {}
configs_dict["nan_value"] = int(c.missing_value)
opt = qcutils.get_keyvaluefromcf(cf,["ER","ER_LT","ERUsingLloydTaylor"],
"minimum_temperature_spread",default=5)
configs_dict["minimum_temperature_spread"] = int(opt)
opt = qcutils.get_keyvaluefromcf(cf,["ER","ER_LT","ERUsingLloydTaylor"],
"step_size_days",default=5)
configs_dict["step_size_days"] = int(opt)
opt = qcutils.get_keyvaluefromcf(cf,["ER","ER_LT","ERUsingLloydTaylor"],
"window_size_days",default=15)
configs_dict["window_size_days"] = int(opt)
opt = qcutils.get_keyvaluefromcf(cf,["ER","ER_LT","ERUsingLloydTaylor"],
"minimum_percent_annual",default=30)
configs_dict["minimum_pct_annual"] = int(opt)
opt = qcutils.get_keyvaluefromcf(cf,["ER","ER_LT","ERUsingLloydTaylor"],
"minimum_percent_noct_window",default=20)
configs_dict["minimum_pct_noct_window"] = int(opt)
#opt = qcutils.get_keyvaluefromcf(cf,["ER","ER_LT","ERUsingLloydTaylor"],
#"minimum_percent_day_window",
#default=50)
#configs_dict["minimum_pct_day_window"] = int(opt)
opt = qcutils.get_keyvaluefromcf(cf,["ER","ER_LT","ERUsingLloydTaylor"],
"output_plots",default="False")
configs_dict["output_plots"] = (opt=="True")
opt = qcutils.get_keyvaluefromcf(cf,["ER","ER_LT","ERUsingLloydTaylor"],
"show_plots",default="False")
configs_dict["show_plots"] = (opt=="True")
opt = qcutils.get_keyvaluefromcf(cf,["ER","ER_LT","ERUsingLloydTaylor"],
"target",default="ER")
configs_dict["target"] = str(opt)
opt = qcutils.get_keyvaluefromcf(cf,["ER","ER_LT","ERUsingLloydTaylor"],
"drivers",default="['Ta']")
configs_dict["drivers"] = ast.literal_eval(opt)[0]
opt = qcutils.get_keyvaluefromcf(cf,["ER","ER_LT","ERUsingLloydTaylor"],
"output",default="ER_LT_all")
configs_dict["output_label"] = opt
configs_dict["output_results"] = True
ts = int(ds.globalattributes["time_step"])
configs_dict["measurement_interval"] = float(ts)/60.0
configs_dict["QC_accept_code"] = 0
opt = qcutils.get_keyvaluefromcf(cf,["Files"],"plot_path",default="plots/")
configs_dict["output_path"] = os.path.join(opt,"respiration/")
return configs_dict
def get_data_dict(ds,configs_dict):
data = {}
# NOTE: series are ndarrays not masked arrays
Fc,Fc_flag,a = qcutils.GetSeries(ds,"Fc")
target = configs_dict["target"]
ER,ER_flag,a = qcutils.GetSeries(ds,target)
Fsd,Fsd_flag,a = qcutils.GetSeries(ds,"Fsd")
T_label = configs_dict["drivers"]
T,T_flag,a = qcutils.GetSeries(ds,T_label)
VPD,VPD_flag,a = qcutils.GetSeries(ds,"VPD")
ustar,ustar_flag,a = qcutils.GetSeries(ds,"ustar")
# replace c.missing_value with numpy.nan
Fc = numpy.where((Fc_flag!=0)|(Fc==c.missing_value),
numpy.nan,Fc)
ustar = numpy.where((ustar_flag!=0)|(ustar==c.missing_value),
numpy.nan,ustar)
ER = numpy.where((ER_flag!=0)|(ER==c.missing_value),
numpy.nan,ER)
#Fsd = numpy.where((Fsd_flag!=0)|(Fsd==c.missing_value),
#numpy.nan,Fsd)
#T = numpy.where((T_flag!=0)|(T==c.missing_value),
#numpy.nan,T)
#VPD = numpy.where((VPD_flag!=0)|(VPD==c.missing_value),
#numpy.nan,VPD)
# put the data in the dictionary
#data["NEE"] = Fc
data["NEE"] = ER
data["PAR"] = Fsd*0.46*4.6
data["TempC"] = T
data["VPD"] = VPD
data["ustar"] = ustar
data["date_time"] = numpy.array(ds.series["DateTime"]["Data"])
return data
# code from dark_T_response_functions.py
def TRF(data_dict, Eo, rb):
return rb * numpy.exp(Eo * (1 / (10 + 46.02) - 1 / (data_dict['TempC'] + 46.02)))
def optimise_rb(data_dict, params_dict):
# Initialise error state variable
error_state = 0
# Get drivers and response
drivers_dict = {driver: data_dict[driver] for driver in ['TempC']}
response_array = data_dict['NEE']
try:
params = scipy.optimize.curve_fit(lambda x, b:
TRF(x, params_dict['Eo_default'], b),
drivers_dict,
response_array,
p0 = [params_dict['rb_prior']])[0]
except RuntimeError:
params = [numpy.nan]
# If negative rb returned, set to nan
if params[0] < 0:
error_state = 9
params = [numpy.nan]
return params, error_state
# code from Partition_NEE.py
def get_dates(datetime_array, configs_dict):
# Assign configs to local vars
window = configs_dict['window_size_days']
# Create a series of continuous whole day dates that will be used for output
# (parameter series will be interpolated between window centres)
start_date = datetime_array[0].date()
end_date = datetime_array[-1].date()
num_days = (end_date - start_date).days + 1 # Add 1 so is inclusive of both end members
all_dates_array = numpy.array([start_date + datetime.timedelta(i) for i in xrange(num_days)])
# Create a shifted array
shift_mins = 60 * configs_dict['measurement_interval']
shift_datetime_array = datetime_array - datetime.timedelta(minutes = shift_mins)
# Check that first and last days are complete and revise start and end dates if required
temp_date = datetime.datetime.combine((shift_datetime_array[0] + datetime.timedelta(1)).date(),
datetime.datetime.min.time())
num_obs = len(numpy.where(shift_datetime_array < temp_date)[0])
if num_obs < 24 * (1 / configs_dict['measurement_interval']):
start_date = start_date + datetime.timedelta(1)
temp_date = datetime.datetime.combine(shift_datetime_array[-1].date(),
datetime.datetime.min.time())
num_obs = len(numpy.where(shift_datetime_array >= temp_date)[0])
if num_obs < 24 * (1 / configs_dict['measurement_interval']):
end_date = end_date - datetime.timedelta(1)
# Calculate the dates that represent the centre of the window for each step
num_days = (end_date - start_date).days + 1 - window # Add 1 so is inclusive of both end members
first_fit_day = start_date + datetime.timedelta(window / 2)
step_days = numpy.arange(0, num_days, configs_dict['step_size_days'])
step_dates_array = [first_fit_day + datetime.timedelta(i) for i in step_days]
# Make an index dictionary for step dates
step_dates_index_dict = {}
for date in step_dates_array:
date_time = (datetime.datetime.combine(date, datetime.datetime.min.time())
+ datetime.timedelta(hours = 12))
start_date = date_time - datetime.timedelta(window / 2.0)
start_date = max(start_date,datetime_array[0])
end_date = date_time + datetime.timedelta(window / 2.0)
end_date = min(end_date,datetime_array[-1])
start_ind = numpy.where(datetime_array == start_date)[0].item() + 1
end_ind = numpy.where(datetime_array == end_date)[0].item()
step_dates_index_dict[date] = [start_ind, end_ind]
# Make an index dictionary for all dates
all_dates_index_dict = {}
for date in all_dates_array:
date_time = datetime.datetime.combine(date, datetime.datetime.min.time())
if date == all_dates_array[0]:
start_ind = 0
else:
start_date = date_time + datetime.timedelta(hours = configs_dict['measurement_interval'])
start_date = max(start_date,datetime_array[0])
if start_date>datetime_array[-1]: break
start_ind = numpy.where(datetime_array == start_date)[0].item()
if date >= all_dates_array[-1]:
end_ind = len(datetime_array)
else:
end_date = date_time + datetime.timedelta(1)
end_date = min(end_date,datetime_array[-1])
end_ind = numpy.where(datetime_array == end_date)[0].item()
all_dates_index_dict[date] = [start_ind, end_ind]
# Make an index dictionary for years
years_index_dict = {}
year_array = numpy.array([i.year for i in shift_datetime_array])
year_list = list(set(year_array))
for yr in year_list:
index = numpy.where(year_array == yr)[0]
years_index_dict[yr] = [index[0], index[-1]]
return step_dates_index_dict, all_dates_index_dict, years_index_dict
def make_initial_guess_dict(data_d):
# Calculate the parameter values that are intialised from data
index = numpy.where(data_d['PAR'] < 10)[0]
daytime_NEE_mean = numpy.nanmean(data_d['NEE'][index])
daytime_NEE_range = (numpy.nanpercentile(data_d['NEE'][index], 5) -
numpy.nanpercentile(data_d['NEE'][index], 95))
params_dict = {'Eo_prior': 100,
'k_prior': 0,
'alpha_prior': -0.01,
'rb_prior': daytime_NEE_mean,
'beta_prior': daytime_NEE_range,
'alpha_default': 0,
'beta_default': 0,
'k_default': 0 }
return params_dict
def optimise_all(data_dict, params_dict):
# Initialise error state variable
error_state = 0
drivers_dict = {driver: data_dict[driver] for driver in ['TempC']}
response_array = data_dict['NEE']
try:
params = scipy.optimize.curve_fit(lambda x, a, b:
TRF(x, a, b),
drivers_dict,
response_array,
p0 = [params_dict['Eo_prior'],
params_dict['rb_prior']])[0]
except RuntimeError:
params = [np.nan, np.nan]
error_state = 3
return params, error_state
def optimise_annual_Eo(data_dict, params_dict, configs_dict, year_index_dict):
# Initialise local variables with configurations
min_pct = configs_dict['minimum_pct_annual']
msmt_int = configs_dict['measurement_interval']
# Get Eo for each year and compile dictionary
status = {"code":0,"message":"OK"}
yearsEo_dict = {}
yearsEo_raw_dict = {}
yearsQC_dict = {}
yearsQC_raw_dict = {}
Eo_pass_keys = []
Eo_range_fail_keys = []
Eo_nan_fail_keys = []
year_list = year_index_dict.keys()
logger.info(" E0 optimised using whole year is as follows")
for yr in year_list:
# Calculate number of recs for year
days = 366 if calendar.isleap(yr) else 365
recs = days * (24 / msmt_int) / 2
# Subset data
sub_dict = subset_window(data_dict, year_index_dict[yr])
sub_dict = subset_nan(sub_dict)
noct_flag = True
# no need to subset for day/night when input data is ER
#sub_dict = subset_daynight(sub_dict, noct_flag)
# Calculate percent of potential annual data that the subset contains
pct = round(float(len(sub_dict['NEE'])) / recs * 100)
# Fit L&T parameters if minimum data criterion satisfied, otherwise nan
if pct >= min_pct:
params, error_code = optimise_all(sub_dict, params_dict)
else:
msg = " Less than "+str(min_pct)+ "% for year "+str(yr)+" ("+str(pct)+"%)"
logger.warning(msg)
params, error_code = [numpy.nan, numpy.nan], 10
# Assign year to pass, range_fail or nan_fail list for subsequent QC and fill
Eo = params[0]
yearsEo_dict[yr] = Eo
yearsEo_raw_dict[yr] = Eo
yearsQC_dict[yr] = error_code
yearsQC_raw_dict[yr] = error_code
if numpy.isnan(Eo):
Eo_nan_fail_keys.append(yr)
elif ((Eo < 50) | (Eo > 400)):
Eo_range_fail_keys.append(yr)
else:
Eo_pass_keys.append(yr)
logger.info(" E0 for "+str(yr) + ": " + str(round(params[0], 1)))
# Do QC on Eo
if len(Eo_pass_keys) != len(yearsEo_dict):
if len(Eo_nan_fail_keys) == len(yearsEo_dict):
msg = " Could not find any values of Eo for any years! Exiting..."
status["code"] = 1
status["message"] = msg
return yearsEo_dict, yearsQC_dict, yearsEo_raw_dict, yearsQC_raw_dict, status
elif len(Eo_pass_keys) != 0:
Eo_mean = numpy.array([yearsEo_dict[i] for i in Eo_pass_keys]).mean()
all_fail_keys = Eo_range_fail_keys + Eo_nan_fail_keys
for i in (all_fail_keys):
yearsEo_dict[i] = Eo_mean
all_fail_keys = [str(key) for key in all_fail_keys]
if len(all_fail_keys) > 1:
all_fail_str = ', '.join(all_fail_keys)
else:
all_fail_str = all_fail_keys[0]
logger.warning(" Eo optimisation failed for the following years: " + all_fail_str)
logger.warning(" Eo estimated from the mean of all other years")
else:
for i in Eo_range_fail_keys:
if yearsEo_dict[i] < 50:
yearsEo_dict[i]=50
else:
yearsEo_dict[i]=400
if len(Eo_range_fail_keys)<=1:
Eo_mean = yearsEo_dict[Eo_range_fail_keys[0]]
else:
l = [yearsEo_dict[i] for i in Eo_range_fail_keys]
Eo_mean = sum(l)/float(len(l))
for i in Eo_nan_fail_keys:
yearsEo_dict[i] = Eo_mean
logger.warning(" Eo estimates were out of range for all years")
logger.warning(" Low estimates have been set to lower limit (50)")
logger.warning(" High estimates have been set to upper limit (400)")
logger.warning(" Parameter estimates are unlikely to be robust!")
else:
logger.info(" Eo estimates passed QC for all years")
return yearsEo_dict, yearsQC_dict, yearsEo_raw_dict, yearsQC_raw_dict, status
def rpLT_createdict(cf,ds,series):
"""
Purpose:
Creates a dictionary in ds to hold information about estimating ecosystem
respiration using the Lloyd-Taylor method.
Usage:
Author: PRI
Date October 2015
"""
# get the section of the control file containing the series
section = qcutils.get_cfsection(cf,series=series,mode="quiet")
# return without doing anything if the series isn't in a control file section
if len(section)==0:
logger.error("ERUsingLloydTaylor: Series "+series+" not found in control file, skipping ...")
return
# check that none of the drivers have missing data
driver_list = ast.literal_eval(cf[section][series]["ERUsingLloydTaylor"]["drivers"])
target = cf[section][series]["ERUsingLloydTaylor"]["target"]
for label in driver_list:
data,flag,attr = qcutils.GetSeriesasMA(ds,label)
if numpy.ma.count_masked(data)!=0:
logger.error("ERUsingLloydTaylor: driver "+label+" contains missing data, skipping target "+target)
return
# create the dictionary keys for this series
rpLT_info = {}
# site name
rpLT_info["site_name"] = ds.globalattributes["site_name"]
# source series for ER
opt = qcutils.get_keyvaluefromcf(cf, [section,series,"ERUsingLloydTaylor"], "source", default="Fc")
rpLT_info["source"] = opt
# target series name
rpLT_info["target"] = cf[section][series]["ERUsingLloydTaylor"]["target"]
# list of drivers
rpLT_info["drivers"] = ast.literal_eval(cf[section][series]["ERUsingLloydTaylor"]["drivers"])
# name of SOLO output series in ds
rpLT_info["output"] = cf[section][series]["ERUsingLloydTaylor"]["output"]
# results of best fit for plotting later on
rpLT_info["results"] = {"startdate":[],"enddate":[],"No. points":[],"r":[],
"Bias":[],"RMSE":[],"Frac Bias":[],"NMSE":[],
"Avg (obs)":[],"Avg (LT)":[],
"Var (obs)":[],"Var (LT)":[],"Var ratio":[],
"m_ols":[],"b_ols":[]}
# create the configuration dictionary
rpLT_info["configs_dict"] = get_configs_dict(cf,ds)
# create an empty series in ds if the output series doesn't exist yet
if rpLT_info["output"] not in ds.series.keys():
data,flag,attr = qcutils.MakeEmptySeries(ds,rpLT_info["output"])
qcutils.CreateSeries(ds,rpLT_info["output"],data,flag,attr)
# create the merge directory in the data structure
if "merge" not in dir(ds): ds.merge = {}
if "standard" not in ds.merge.keys(): ds.merge["standard"] = {}
# create the dictionary keys for this series
ds.merge["standard"][series] = {}
# output series name
ds.merge["standard"][series]["output"] = series
# source
ds.merge["standard"][series]["source"] = ast.literal_eval(cf[section][series]["MergeSeries"]["Source"])
# create an empty series in ds if the output series doesn't exist yet
if ds.merge["standard"][series]["output"] not in ds.series.keys():
data,flag,attr = qcutils.MakeEmptySeries(ds,ds.merge["standard"][series]["output"])
qcutils.CreateSeries(ds,ds.merge["standard"][series]["output"],data,flag,attr)
return rpLT_info
def rpLT_initplot(**kwargs):
# set the margins, heights, widths etc
pd = {"margin_bottom":0.075,"margin_top":0.075,"margin_left":0.05,"margin_right":0.05,
"xy_height":0.20,"xy_width":0.20,"xyts_space":0.05,"xyts_space":0.05,
"ts_width":0.9}
# set the keyword arguments
for key, value in kwargs.iteritems():
pd[key] = value
# calculate bottom of the first time series and the height of the time series plots
pd["ts_bottom"] = pd["margin_bottom"]+pd["xy_height"]+pd["xyts_space"]
pd["ts_height"] = (1.0 - pd["margin_top"] - pd["ts_bottom"])/float(pd["nDrivers"]+1)
return pd
def rpLT_plot(pd,ds,series,driverlist,targetlabel,outputlabel,LT_info,si=0,ei=-1):
""" Plot the results of the Lloyd-Taylor run. """
# get the time step
ts = int(ds.globalattributes['time_step'])
# get a local copy of the datetime series
if ei==-1:
dt = ds.series['DateTime']['Data'][si:]
else:
dt = ds.series['DateTime']['Data'][si:ei+1]
xdt = numpy.array(dt)
Hdh,f,a = qcutils.GetSeriesasMA(ds,'Hdh',si=si,ei=ei)
# get the observed and modelled values
obs,f,a = qcutils.GetSeriesasMA(ds,targetlabel,si=si,ei=ei)
mod,f,a = qcutils.GetSeriesasMA(ds,outputlabel,si=si,ei=ei)
# make the figure
if LT_info["show_plots"]:
plt.ion()
else:
plt.ioff()
fig = plt.figure(pd["fig_num"],figsize=(13,8))
fig.clf()
fig.canvas.set_window_title(targetlabel+" (LT): "+pd["startdate"]+" to "+pd["enddate"])
plt.figtext(0.5,0.95,pd["title"],ha='center',size=16)
# XY plot of the diurnal variation
rect1 = [0.10,pd["margin_bottom"],pd["xy_width"],pd["xy_height"]]
ax1 = plt.axes(rect1)
# get the diurnal stats of the observations
mask = numpy.ma.mask_or(obs.mask,mod.mask)
obs_mor = numpy.ma.array(obs,mask=mask)
dstats = qcutils.get_diurnalstats(dt,obs_mor,LT_info)
ax1.plot(dstats["Hr"],dstats["Av"],'b-',label="Obs")
# get the diurnal stats of all SOLO predictions
dstats = qcutils.get_diurnalstats(dt,mod,LT_info)
ax1.plot(dstats["Hr"],dstats["Av"],'r-',label="LT(all)")
mod_mor = numpy.ma.masked_where(numpy.ma.getmaskarray(obs)==True,mod,copy=True)
dstats = qcutils.get_diurnalstats(dt,mod_mor,LT_info)
ax1.plot(dstats["Hr"],dstats["Av"],'g-',label="LT(obs)")
plt.xlim(0,24)
plt.xticks([0,6,12,18,24])
ax1.set_ylabel(targetlabel)
ax1.set_xlabel('Hour')
ax1.legend(loc='upper right',frameon=False,prop={'size':8})
# XY plot of the 30 minute data
rect2 = [0.40,pd["margin_bottom"],pd["xy_width"],pd["xy_height"]]
ax2 = plt.axes(rect2)
ax2.plot(mod,obs,'b.')
ax2.set_ylabel(targetlabel+'_obs')
ax2.set_xlabel(targetlabel+'_LT')
# plot the best fit line
coefs = numpy.ma.polyfit(numpy.ma.copy(mod),numpy.ma.copy(obs),1)
xfit = numpy.ma.array([numpy.ma.minimum(mod), | numpy.ma.maximum(mod) | numpy.ma.maximum |
###############################################################################
#
# Project: Embedded Learning Library (ELL)
# File: cntk_to_ell_impporter_test.py (importers)
# Authors: <NAME>, <NAME>, <NAME>
#
# Requires: Python 3.x, cntk-2.4
#
###############################################################################
import os
script_path = os.path.dirname(os.path.abspath(__file__))
SkipFullModelTests = False
import os
import unittest
import sys
import numpy as np
sys.path.append(os.path.join(script_path, '../../../utilities/pythonlibs'))
sys.path.append(os.path.join(script_path, '../../../utilities/pythonlibs/vision'))
sys.path.append(os.path.join(script_path, '../../common/test'))
sys.path.append(os.path.join(script_path, '../..'))
sys.path.append(os.path.join(script_path, '..'))
from cntk.layers import Convolution, MaxPooling, Dense
from cntk import constant, param_relu, load_model
from cntk.ops import *
from itertools import product
from download_helper import *
import find_ell
import ell
import logger
import common_importer_test
import cntk_to_ell
import lib.cntk_converters as cntk_converters
import lib.cntk_layers as cntk_layers
import lib.cntk_utilities as cntk_utilities
import cntk_import
from custom_functions import BinaryConvolution, CustomSign
def BatchNormalizationTester(map_rank=1,
init_scale=1,
init_bias=0,
normalization_time_constant=5000,
blend_time_constant=0,
epsilon=0.00001,
use_cntk_engine=True,
norm_shape=(),
init_mean=None,
init_variance=None,
name=''):
"""Instantiates a batch normalization layer for testing purposes, where mean
and variance can be set.
"""
# parameters bound to this Function
scale = parameter(shape=norm_shape, init=init_scale, name='scale')
bias = parameter(shape=norm_shape, init=init_bias, name='bias')
run_mean = constant(shape=norm_shape, value=init_mean,
name='aggregate_mean')
run_variance = constant(
shape=norm_shape, value=init_variance, name='aggregate_variance')
run_count = constant(0, shape=(), name='aggregate_count')
# expression
def batch_normalize(x):
return batch_normalization(
x, scale, bias, run_mean, run_variance, running_count=run_count,
spatial=map_rank == 1,
normalization_time_constant=normalization_time_constant,
blend_time_constant=blend_time_constant, epsilon=epsilon,
use_cudnn_engine=not use_cntk_engine)
return batch_normalize
def compare_predictor_output(modelFile, labels, modelTestInput=None,
maxLayers=None):
"""Compares an ell.NeuralNetworkPredictor against its equivalent CNTK
model.
Parameters:
modelFile -- path to the CNTK model file
labels -- array of labels
modelTestInput -- input data in row, column, channel ordering
maxLayers -- integer to indicate how many layers to run before stopping.
Setting to None will run all layers and compare against the
original model.
"""
_logger = logger.get()
z = load_model(modelFile)
modelLayers = cntk_utilities.get_model_layers(z)
# Get the relevant CNTK layers that we will convert to ELL
layersToConvert = cntk_layers.get_filtered_layers_list(
modelLayers, maxLayers)
if not layersToConvert:
raise RuntimeError("No layers are converted, nothing to test")
# Create a list of ELL layers from the relevant CNTK layers
_logger.info("\nCreating ELL predictor...")
ellLayers = cntk_layers.convert_cntk_layers_to_ell_layers(
layersToConvert)
# Create an ELL neural network predictor from the relevant CNTK layers
predictor = ell.neural.NeuralNetworkPredictor(ellLayers)
if not modelTestInput:
inputShape = predictor.GetInputShape()
modelTestInput = np.random.uniform(
low=0, high=255, size=(
inputShape.rows, inputShape.columns, inputShape.channels)
).astype(np.float_)
ellTestInput = modelTestInput.ravel() # rows, columns, channels
ellResults = predictor.Predict(ellTestInput)
# rows, columns, channels => channels, rows, columns
cntkTestInput = np.moveaxis(modelTestInput, -1, 0).astype(np.float32)
cntkTestInput = np.ascontiguousarray(cntkTestInput)
# Get the equivalent CNTK model
if not maxLayers:
_logger.info("\nRunning original CNTK model...")
_, out = z.forward(
{z.arguments[0]: [cntkTestInput],
z.arguments[1]: [list(range(len(labels)))]})
for output in z.outputs:
if (output.shape == (len(labels),)):
out = out[output]
cntkResults = softmax(out[0]).eval()
# For the full model, we compare prediction output instead of layers
np.testing.assert_array_almost_equal(
cntkResults, ellResults, 5,
'prediction outputs do not match! (for model ' + modelFile + ')')
else:
_logger.info("\nRunning partial CNTK model...")
if (layersToConvert[-1].layer.op_name == 'CrossEntropyWithSoftmax' and
len(layersToConvert) > 2):
# ugly hack for CrossEntropyWithSoftmax
zz = as_composite(layersToConvert[-2].layer)
zz = softmax(zz)
else:
zz = as_composite(layersToConvert[-1].layer)
zz = softmax(zz)
out = zz(cntkTestInput)
orderedCntkModelResults = cntk_converters.\
get_vector_from_cntk_array(out)
np.testing.assert_array_almost_equal(
orderedCntkModelResults, ellResults, 5,
('prediction outputs do not match! (for partial model ' +
modelFile + ')'))
class CntkLayersTestCase(common_importer_test.EllImporterTestBase):
def verify_compiled(self, predictor, input, expectedOutput, module_name,
method_name, precision=5):
map = ell.neural.utilities.ell_map_from_predictor(predictor)
# Note: for testing purposes, callback functions assume the "model" namespace
compile_options = ell.model.MapCompilerOptions()
compile_options.useBlas = False
compiled = map.Compile("host", "model", method_name, compilerOptions=compile_options)
compiledResults = compiled.Compute(input)
# Compare compiled results
if precision > 0:
np.testing.assert_array_almost_equal(
expectedOutput, compiledResults, precision,
'results for %s layer do not match ELL compiled output !' %
(module_name))
else:
np.testing.assert_array_equal(
expectedOutput, compiledResults,
'results for %s layer do not match ELL compiled output !' %
(module_name))
def test_dense_layer(self):
"""Test a model with a single CNTK Dense layer against the equivalent
ELL predictor. This verifies that the import functions reshape and
reorder values appropriately and that the equivalent ELL layer
produces comparable output
"""
# Create a Dense CNTK layer with no bias or activation
denseLayer = Dense(5, bias=False)
x = input((2, 3, 4)) # Input order for CNTK is channels, rows, columns
cntkModel = denseLayer(x)
# Create a test set of weights to use for both CNTK and ELL layers
# CNTK has these in channels, rows, columns, [output shape] order
weightValues = np.arange(120, dtype=np.float_).reshape(2, 3, 4, 5)
# Set the weights
denseLayer.parameters[0].value = weightValues
# create an ELL Tensor from the cntk weights, which re-orders the
# weights and produces an appropriately dimensioned tensor
weightTensor = cntk_converters.\
get_tensor_from_cntk_dense_weight_parameter(
denseLayer.parameters[0])
# Create the equivalent ELL predictor
layerParameters = ell.neural.LayerParameters(
# Input order for ELL is rows, columns, channels
ell.math.TensorShape(3, 4, 2),
ell.neural.NoPadding(),
ell.math.TensorShape(1, 1, 5),
ell.neural.NoPadding(),
ell.nodes.PortType.smallReal)
layer = ell.neural.FullyConnectedLayer(layerParameters, weightTensor)
predictor = ell.neural.NeuralNetworkPredictor([layer])
# Get the results for both
inputValues = np.arange(24, dtype=np.float32).reshape(2, 3, 4)
orderedInputValues = cntk_converters.get_vector_from_cntk_array(
inputValues)
cntkResults = cntkModel(inputValues)
orderedCntkResults = cntk_converters.get_vector_from_cntk_array(
cntkResults)
ellResults = predictor.Predict(orderedInputValues)
# Compare the results
np.testing.assert_array_equal(
orderedCntkResults, ellResults,
'results for Dense layer do not match!')
# now run same over ELL compiled model
self.verify_compiled(predictor, orderedInputValues, orderedCntkResults,
"dense", "test")
def test_max_pooling_layer(self):
"""Test a model with a single CNTK MaxPooling layer against the
equivalent ELL predictor. This verifies that the import functions
reshape and reorder values appropriately and that the equivalent ELL
layer produces comparable output
"""
_logger = logger.get()
x = input((2, 15, 15))
count = 0
inputValues = np.random.uniform(
low=-5, high=5, size=(2, 15, 15)).astype(np.float32)
for pool_size, stride_size in product(range(2, 4), range(2, 3)):
count += 1
_logger.info("test pooling size ({0},{0}) and stride {1}".format(
pool_size, stride_size))
# Create a MaxPooling CNTK layer
poolingLayer = MaxPooling(
(pool_size, pool_size), pad=True, strides=stride_size)
# Input order for CNTK is channels, rows, columns
cntkModel = poolingLayer(x)
# Get the results for both
cntkResults = cntkModel(inputValues)[0]
outputShape = cntkResults.shape
padding = int((pool_size - 1) / 2)
rows = int(inputValues.shape[1] + 2 * padding)
columns = int(inputValues.shape[2] + 2 * padding)
channels = int(inputValues.shape[0])
# Create the equivalent ELL predictor
layerParameters = ell.neural.LayerParameters(
# Input order for ELL is rows, columns, channels
ell.math.TensorShape(rows, columns, channels),
ell.neural.MinPadding(padding),
ell.math.TensorShape(
outputShape[1], outputShape[2], outputShape[0]),
ell.neural.NoPadding(),
ell.nodes.PortType.smallReal)
poolingParameters = ell.neural.PoolingParameters(
pool_size, stride_size)
layer = ell.neural.PoolingLayer(
layerParameters, poolingParameters, ell.neural.PoolingType.max)
predictor = ell.neural.NeuralNetworkPredictor([layer])
# Note that cntk inserts an extra dimension of 1 in the front
orderedCntkResults = cntk_converters.\
get_vector_from_cntk_array(cntkResults)
orderedInputValues = cntk_converters.\
get_vector_from_cntk_array(inputValues)
ellResults = predictor.Predict(orderedInputValues)
# Compare them
np.testing.assert_array_almost_equal(
orderedCntkResults, ellResults, 5,
('results for MaxPooling layer do not match! (poolsize = '
'{}, stride = {}').format(pool_size, stride_size))
# now run same over ELL compiled model
self.verify_compiled(
predictor, orderedInputValues, orderedCntkResults,
'max_pooling{}_{}'.format(pool_size, stride_size),
'test_' + str(count))
def test_convolution_layer(self):
"""Test a model with a single CNTK Convolution layer against the
equivalent ELL predictor. This verifies that the import functions
reshape and reorder values appropriately and that the equivalent ELL
layer produces comparable output
"""
# Create a Convolution CNTK layer with no bias or activation,
# auto-padding, stride of 1
convolutionLayer = Convolution((3, 3), 5, pad=(
True, True), strides=1, bias=False, init=0)
x = input((2, 3, 4)) # Input order for CNTK is channels, rows, columns
cntkModel = convolutionLayer(x)
# Create a test set of weights to use for both CNTK and ELL layers
# CNTK has these in filters, channels, rows, columns order
weightValues = | np.arange(90, dtype=np.float_) | numpy.arange |
import os
import sys
import pdb
import glob
import json
import time
import re
import warnings
import numpy as np
os.environ['TF_CUDNN_USE_AUTOTUNE']='0'
import tensorflow as tf
import functions_graph_assembly as fga
import functions_evaluation
def run_training_routine(
train_regex,
num_epochs=1,
batch_size=8,
display_step=5,
save_step=10000,
output_directory="/saved_models/example_arch",
brain_net_ckpt_to_load=None,
frontend_ckpt_to_load=None,
controller="/cpu:0",
iterator_device="/cpu:0",
max_runtime=None,
random_seed=517,
signal_rate=20000,
TASK_LOSS_PARAMS={},
N_CLASSES_DICT={},
ITERATOR_PARAMS={},
FRONTEND_PARAMS={},
COCH_PARAMS={},
BRAIN_PARAMS={},
NORMAL_HEARING_PARAMS={},
OPTM_PARAMS={},
valid_regex=None,
valid_step=10000,
valid_display_step=100,
early_stopping_metrics=None,
early_stopping_baselines=None,
load_iterator=False,
save_iterator=True,
**kwargs):
'''
This function runs the multi-tower training routine (brain network)
Args
----
train_regex (str): regex that globs .tfrecords files for training dataset
num_epochs (int): number of times to repeat the dataset
batch_size (int): number of examples per batch per GPU
display_step (int): print out training info every display_step steps
save_step (int): checkpoint trainable variables every save_step steps
output_directory (str): location to save the new checkpoints model parameters
brain_net_ckpt_to_load (str): path to brain_network .ckpt-# to load, None starts training from most recent checkpoint
frontend_ckpt_to_load (str): path to frontend .ckpt-# to load, None starts training from most recent checkpoint
controller (str): device to save the variables and consolidate gradients (GPU is more efficient on summit)
iterator_device (str): device that hosts the input iterator (to use tf.Dataset API, must be a CPU)
max_runtime (int): maximum time (in seconds) to run training before final checkpoint (no limit if set to None or 0)
random_seed (int): random seed to set tensorflow and numpy
signal_rate (int): sampling rate of input signal (Hz)
TASK_LOSS_PARAMS (dict): dictionary containing the loss parameters for each task, keys are the task paths
N_CLASSES_DICT (dict): dictionary specifying number of output classes for each task
ITERATOR_PARAMS (dict): parameters for building the input data iterator
FRONTEND_PARAMS (dict): parameters for building the frontend model graph
COCH_PARAMS (dict): parameters for building the cochlear model
BRAIN_PARAMS (dict): parameters for building the brain network
NORMAL_HEARING_PARAMS (dict): contains parameters for the "normal" hearing network, if matching on layer activations
valid_regex (str): regex that globs .tfrecords files for validation dataset (if None, no validation)
valid_step (int): number of training steps after which to run validation procedure (if <= 0, no validation)
valid_display (int): print out validation procedure info every valid_display_step steps
early_stopping_metrics (dict): metric name and minimum delta pairs for early stopping (see functions_evaluation.py)
early_stopping_baselines (dict): baseline values for the early stopping metrics to reach (see functions_evaluation.py)
load_iterator (bool): set to False to prevent training routine from loading iterator checkpoint
save_iterator (bool): set to False to prevent training routine from building iterator saver (cant save or load iterator)
'''
### RESET DEFAULT GRAPH AND SET RANDOM SEEDS ###
tf.reset_default_graph()
tf.random.set_random_seed(random_seed)
| np.random.seed(random_seed) | numpy.random.seed |
import os
import random
import pickle
from math import log, e
import gensim
import scipy.io as sio
import numpy as np
class HOIClass:
def __init__(self, object_name, verb_name, hoi_id):
self._object_name = object_name
self._verb_name = verb_name
self._hoi_id = hoi_id
def object_name(self):
return self._object_name
def verb_name(self):
return self._verb_name
def hoi_name(self):
return self._verb_name + ' ' + self._object_name
def object_class_mapping(hoi_classes, hoi_obj_classes):
hoi_range = [(161, 170), (11, 24), (66, 76), (147, 160), (1, 10), (55, 65), (187, 194), (568, 576), (32, 46),
(563, 567), (326, 330), (503, 506), (415, 418), (244, 247), (25, 31), (77, 86), (112, 129), (130, 146),
(175, 186), (97, 107), (314, 325), (236, 239), (596, 600), (343, 348), (209, 214), (577, 584),
(353, 356), (539, 546), (507, 516), (337, 342), (464, 474), (475, 483), (489, 502), (369, 376),
(225, 232), (233, 235), (454, 463), (517, 528), (534, 538), (47, 54), (589, 595), (296, 305),
(331, 336), (377, 383), (484, 488), (253, 257), (215, 224), (199, 208), (439, 445), (398, 407),
(258, 264), (274, 283), (357, 363), (419, 429), (306, 313), (265, 273), (87, 92), (93, 96), (171, 174),
(240, 243), (108, 111), (551, 558), (195, 198), (384, 389), (394, 397), (435, 438), (364, 368),
(284, 290), (390, 393), (408, 414), (547, 550), (450, 453), (430, 434), (248, 252), (291, 295),
(585, 588), (446, 449), (529, 533), (349, 352), (559, 562)]
hoi_obj2ind = dict(zip(hoi_obj_classes, xrange(len(hoi_obj_classes))))
det_obj_classes = [hoi_classes[int[0] - 1].object_name() for int in hoi_range]
det_obj2hoi_obj = {}
for i in range(len(det_obj_classes)):
obj_name = det_obj_classes[i]
det_obj_ind = i+1
hoi_obj_ind = hoi_obj2ind[obj_name]
det_obj2hoi_obj[det_obj_ind] = hoi_obj_ind
return det_obj2hoi_obj
def load_object_word2vec(object_classes, w2v_path, save_dir):
print('Loading obj2vec ...')
obj2vec_path = os.path.join(save_dir, 'hico_obj2vec.pkl')
if os.path.exists(obj2vec_path):
with open(obj2vec_path) as f:
obj2vec = pickle.load(f)
return obj2vec
# load pretrained word2vec
model = gensim.models.KeyedVectors.load_word2vec_format(w2v_path, binary=True)
obj2vec = np.zeros((len(object_classes), 300))
for i, obj_class in enumerate(object_classes):
obj_class_clean = obj_class
if obj_class == 'dining_table':
obj_class_clean = 'table'
elif obj_class == 'baseball_bat':
obj_class_clean = 'bat'
elif obj_class == 'baseball_glove':
obj_class_clean = 'glove'
elif obj_class == 'hair_drier':
obj_class_clean = 'drier'
elif obj_class == 'potted_plant':
obj_class_clean = 'plant'
elif obj_class == 'cell_phone':
obj_class_clean = 'phone'
elif obj_class == 'fire_hydrant':
obj_class_clean = 'hydrant'
elif obj_class == 'hot_dog':
obj_class_clean = 'bread'
elif obj_class == 'parking_meter':
obj_class_clean = 'meter'
elif obj_class == 'sports_ball':
obj_class_clean = 'ball'
elif obj_class == 'stop_sign':
obj_class_clean = 'sign'
elif obj_class == 'teddy_bear':
obj_class_clean = 'toy'
elif obj_class == 'tennis_racket':
obj_class_clean = 'racket'
elif obj_class == 'traffic_light':
obj_class_clean = 'light'
elif obj_class == 'wine_glass':
obj_class_clean = 'glass'
vec = model[obj_class_clean]
if vec is None or len(vec) == 0 or np.sum(vec) == 0:
print('[WARNING] %s' % obj_class)
obj2vec[i] = vec
with open(obj2vec_path, 'wb') as f:
pickle.dump(obj2vec, f)
return obj2vec
def load_hoi_classes(hoi_class_path):
hoi_cls_list = []
obj_cls_list = []
vrb_cls_list = []
with open(hoi_class_path) as f:
mat_hoi_classes = pickle.load(f)
for hoi_cls_id, hoi_cls in enumerate(mat_hoi_classes):
obj_cls_name = hoi_cls.split(' ')[1]
if obj_cls_name not in obj_cls_list:
obj_cls_list.append(obj_cls_name)
vrb_cls_name = hoi_cls.split(' ')[0]
if vrb_cls_name not in vrb_cls_list:
vrb_cls_list.append(vrb_cls_name)
hoi_cls_list.append(HOIClass(obj_cls_name, vrb_cls_name, hoi_cls_id))
hoi2int = [[] for _ in range(len(hoi_cls_list))]
curr_hoi_stt = 0
curr_obj = hoi_cls_list[0].object_name()
for i in range(1, len(hoi_cls_list)):
hoi = hoi_cls_list[i]
if hoi.object_name() != curr_obj:
# last interval ended
curr_hoi_end = i - 1
for j in range(curr_hoi_stt, curr_hoi_end + 1):
hoi2int[j] = [curr_hoi_stt, curr_hoi_end]
curr_hoi_stt = i
curr_obj = hoi.object_name()
curr_hoi_end = len(hoi_cls_list) - 1
for j in range(curr_hoi_stt, curr_hoi_end + 1):
hoi2int[j] = [curr_hoi_stt, curr_hoi_end]
# obj2int = [[] for _ in range(len(obj_cls_list))]
# curr_obj = hoi_cls_list[0].object_name()
# curr_int_stt = 0
# curr_obj_ind = 0
# for i in range(1, len(hoi_cls_list)):
# obj = hoi_cls_list[i].object_name()
# if obj != curr_obj:
# curr_int_end = i - 1
# assert curr_obj == obj_cls_list[curr_obj_ind]
# obj2int[curr_obj_ind] = [curr_int_stt, curr_int_end]
# curr_int_stt = i
# curr_obj = obj
# curr_obj_ind += 1
# obj2int[curr_obj_ind] = [curr_int_stt, len(hoi_cls_list) - 1]
return hoi_cls_list, obj_cls_list, vrb_cls_list, hoi2int
def load_image_info(anno_path, save_dir, image_set='train'):
print('Loading image set info ...')
save_path = os.path.join(save_dir, 'hico_image_info_%s.pkl' % image_set)
if os.path.exists(save_path):
with open(save_path) as f:
all_image_info = pickle.load(f)
return all_image_info
all_image_info = {}
mat_anno_db = sio.loadmat(anno_path)
mat_anno_db = mat_anno_db['bbox_%s' % image_set]
for mat_anno in mat_anno_db[0, :]:
image_id = mat_anno['filename'][0].split('.')[0]
image_id = int(image_id[-8:])
all_image_info[image_id] = [mat_anno['size']['width'][0, 0][0, 0], mat_anno['size']['height'][0, 0][0, 0]]
with open(save_path, 'wb') as f:
pickle.dump(all_image_info, f)
return all_image_info
def extract_spatial_feature(box1, box2, image_size):
img_w, img_h = image_size
img_w = float(img_w)
img_h = float(img_h)
sbj_h = box1['ymax'] - box1['ymin'] + 1
sbj_w = box1['xmax'] - box1['xmin'] + 1
obj_h = box2['ymax'] - box2['ymin'] + 1
obj_w = box2['xmax'] - box2['xmin'] + 1
spatial_feat = [
box1['xmin'] * 1.0 / img_w,
box1['ymin'] * 1.0 / img_h,
box1['xmax'] * 1.0 / img_w,
box1['ymax'] * 1.0 / img_h,
(sbj_h * sbj_w * 1.0) / (img_h * img_w),
box2['xmin'] * 1.0 / img_w,
box2['ymin'] * 1.0 / img_h,
box2['xmax'] * 1.0 / img_w,
box2['ymax'] * 1.0 / img_h,
(obj_h * obj_w * 1.0) / (img_h * img_w),
(box1['xmin'] - box2['xmin'] + 1) / (obj_w * 1.0),
(box1['ymin'] - box2['ymin'] + 1) / (obj_h * 1.0),
log(sbj_w * 1.0 / obj_w, e),
log(sbj_h * 1.0 / obj_h, e)]
return spatial_feat
def prepare_hico(hico_root, save_dir):
hoi_db_path = os.path.join(save_dir, 'hico_trainval_anno.pkl')
if not os.path.exists(save_dir):
os.mkdir(save_dir)
if os.path.exists(hoi_db_path):
print('Loading annotations ...')
with open(hoi_db_path) as f:
hoi_db = pickle.load(f)
return hoi_db
image_info_path = os.path.join(hico_root, 'anno_bbox_full.mat')
image_info = load_image_info(image_info_path, save_dir)
hoi_class_path = os.path.join(hico_root, 'hoi_categories.pkl')
hoi_cates, obj_cates, vrb_cates, _ = load_hoi_classes(hoi_class_path)
obj2ind = dict(zip(obj_cates, xrange(len(obj_cates))))
hoi_class_num = len(hoi_cates)
obj2vec = load_object_word2vec(obj_cates, 'GoogleNews-vectors-negative300.bin', save_dir)
print('Loading annotations ...')
anno_gt_path = os.path.join(hico_root, 'train_GT_HICO_with_pose.pkl')
anno_ng_path = os.path.join(hico_root, 'train_NG_HICO_with_pose.pkl')
anno_gt = pickle.load(open(anno_gt_path))
anno_ng = pickle.load(open(anno_ng_path))
hboxes = []
oboxes = []
spa_feats = []
hoi_classes = []
bin_classes = []
obj_classes = []
skeletons = []
print('Processing annotations ...')
anno_gt_db = {}
for hoi_ins_gt in anno_gt:
image_id = hoi_ins_gt[0]
if image_id in anno_gt_db:
anno_gt_db[image_id].append(hoi_ins_gt)
else:
anno_gt_db[image_id] = [hoi_ins_gt]
for image_id, img_pos_hois in anno_gt_db.items():
image_size = image_info[image_id]
if image_size[0] == 0 or image_size[1] == 0:
print(image_id)
if image_id in anno_ng and len(anno_ng[image_id]) > 0:
img_neg_hois0 = anno_ng[image_id]
if len(img_neg_hois0) > len(img_pos_hois):
inds = random.sample(range(len(img_neg_hois0)), len(img_pos_hois))
else:
inds = []
for i in range(int(len(img_pos_hois) / len(img_neg_hois0))):
inds += range(len(img_neg_hois0))
for i in range(len(img_pos_hois) - len(inds)):
inds.append(i)
img_neg_hois = [img_neg_hois0[ind] for ind in inds]
assert len(img_neg_hois) == len(img_pos_hois)
else:
img_neg_hois = []
for pn, hois in enumerate([img_pos_hois, img_neg_hois]):
for raw_hoi in hois:
hbox = raw_hoi[2]
obox = raw_hoi[3]
bin_class = pn # pos: 0; neg: 1
hoi_class_ids = raw_hoi[1]
if isinstance(hoi_class_ids, int):
hoi_class_ids = [hoi_class_ids]
obj_class = obj2ind[hoi_cates[hoi_class_ids[0]].object_name()]
hoi_class = [0] * hoi_class_num
if pn == 0:
skeleton = raw_hoi[5]
else:
skeleton = raw_hoi[7]
for id in hoi_class_ids:
hoi_class[id] = 1
hbox_tmp = {
'xmin': float(hbox[0]),
'ymin': float(hbox[1]),
'xmax': float(hbox[2]),
'ymax': float(hbox[3]),
}
obox_tmp = {
'xmin': float(obox[0]),
'ymin': float(obox[1]),
'xmax': float(obox[2]),
'ymax': float(obox[3]),
}
spa_feat = extract_spatial_feature(hbox_tmp, obox_tmp, image_size)
spa_feats.append(spa_feat)
hboxes.append(hbox)
oboxes.append(obox)
obj_classes.append(obj_class)
hoi_classes.append(hoi_class)
bin_classes.append(bin_class)
skeletons.append(skeleton)
num_item = len(hboxes)
num_train = int(num_item * 0.7)
train_db = {
'obj2vec': obj2vec,
'hboxes': np.array(hboxes[:num_train]),
'oboxes': np.array(oboxes[:num_train]),
'spa_feats': np.array(spa_feats[:num_train]),
'obj_classes': np.array(obj_classes[:num_train]),
'hoi_classes': | np.array(hoi_classes[:num_train]) | numpy.array |
import os
import tempfile
import numpy as np
import scipy.ndimage.measurements as meas
from functools import reduce
import warnings
import sys
sys.path.append(os.path.abspath(r'../lib'))
import NumCppPy as NumCpp # noqa E402
####################################################################################
def factors(n):
return set(reduce(list.__add__,
([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))
####################################################################################
def test_seed():
np.random.seed(1)
####################################################################################
def test_abs():
randValue = np.random.randint(-100, -1, [1, ]).astype(np.double).item()
assert NumCpp.absScaler(randValue) == np.abs(randValue)
components = np.random.randint(-100, -1, [2, ]).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.absScaler(value), 9) == np.round(np.abs(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.absArray(cArray), np.abs(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \
1j * np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.absArray(cArray), 9), np.round(np.abs(data), 9))
####################################################################################
def test_add():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
####################################################################################
def test_alen():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.alen(cArray) == shape.rows
####################################################################################
def test_all():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1))
####################################################################################
def test_allclose():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
tolerance = 1e-5
data1 = np.random.randn(shape.rows, shape.cols)
data2 = data1 + tolerance / 10
data3 = data1 + 1
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
assert NumCpp.allclose(cArray1, cArray2, tolerance) and not NumCpp.allclose(cArray1, cArray3, tolerance)
####################################################################################
def test_amax():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1))
####################################################################################
def test_amin():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), np.min(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), | np.min(data, axis=1) | numpy.min |
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# #
# ----- POST-PROCESSING OF HDF FILES: APPENDING DATA OF ----- #
# LOCAL CELL DENSITY, NUCLEUS SIZE & DNA CONTENT #
# FROM FLUORESCENCE SIGNAL INTENSITY #
# #
# ----- Creator: <NAME> ----- #
# #
# ----- Last updated: 31th Jan 2020 ----- #
# #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import h5py
import numpy as np
from PostProcessing_HDF_Data.Local_Density_Nucleus_Size_Fluo_Signal_Smooth_Class import Smooth_or_Scale_Raw_Data
def CreateHDF5Dataset_Density(hdf5_file):
""" Create an array which you will append to the existing 'LBEPR' table.
shape = len(lbepr, 3); [child_1, child_2, generation]
"""
# Call the classes & their relevant functions:
call = Smooth_or_Scale_Raw_Data(hdf5_file=hdf5_file)
raw_density, smooth_density = call.Smooth_Data(which_data="density")
raw_nucleus, smooth_nucleus = call.Smooth_Data(which_data="nucleus")
raw_fsignal, smooth_fsignal = call.Smooth_Data(which_data="fsignal")
raw_scaled_fluo = call.Scale_Fluo_Signal_Interphase(which_data="raw")
smooth_scaled_fluo = call.Scale_Fluo_Signal_Interphase(which_data="smooth")
# Check lengths:
if len(raw_density) != len(smooth_density) != len(raw_nucleus) != len(smooth_nucleus) \
!= len(raw_fsignal) != len(smooth_fsignal) != len(raw_scaled_fluo) != len(smooth_scaled_fluo):
raise ValueError("Data of different lengths!")
# Create a list of lists with data, return in np.array form:
# TODO: Do this better! Transpose?
maxi_list = []
for a, b, c, d, e, f, g, h in zip(raw_density, smooth_density,
raw_nucleus, smooth_nucleus,
raw_fsignal, smooth_fsignal,
raw_scaled_fluo, smooth_scaled_fluo):
lst = [a, b, c, d, e, f, g, h]
maxi_list.append(lst)
data = | np.array(maxi_list, dtype=np.float64) | numpy.array |
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
import os, itertools,copy
import numpy as np
from scipy import sparse
from scipy.spatial.distance import cdist
from ase.neighborlist import NeighborList,natural_cutoffs
from ase.data import atomic_masses_iupac2016,atomic_numbers
from pymatgen.symmetry.analyzer import PointGroupAnalyzer as pga
from pymove import Structure
from pymove.io import read,write,output_geo
"""
It will be better overall to break these complex operations into classes
which will be easier to use and have a more intuitive API.
"""
def get_molecules(struct, mult=1.05):
"""
Arguments
---------
mult: float
Multiplicative factor to use for natural_cutoffs
Returns
-------
List of Structure objects for each molecule identified using the smallest
molecule representation.
"""
molecule_list = find_molecules(struct, mult=mult)
molecule_struct_list = extract_molecules(struct, molecule_list,
mult=mult)
if len(molecule_struct_list) == 0:
raise Exception("No molecules found for structure {}."
.format(struct.struct_id))
return molecule_struct_list
def find_molecules(struct, mult=1.05):
"""
Identify molecular fragments in struct
Returns
-------
List of lists for atom index of each molecule
"""
atoms = struct.get_ase_atoms()
cutOff = natural_cutoffs(atoms, mult=mult)
## Skin=0.3 is not a great parameter, but it seems to work pretty well
## for mulicule identification. In addition, it's not the same affect as
## change the mult value because it's a constant addition to all
## covalent bonds.
neighborList = NeighborList(cutOff, skin=0.0)
neighborList.update(atoms)
matrix = neighborList.get_connectivity_matrix()
n_components, component_list = sparse.csgraph.connected_components(matrix)
molecule_list = [np.where(component_list == x)[0]
for x in range(n_components)]
return molecule_list
def extract_molecules(struct, molecule_list, whole_molecules=True,
mult=1.05):
""" Converts list of list of coordinates to Structures """
# Information from original structure
geo = struct.get_geo_array()
elements = struct.geometry['element']
# Extract molecule geometries from original
molecule_geo_list = [geo[x,:] for x in molecule_list]
molecule_ele_list = [elements[x] for x in molecule_list]
# Convert geometry to Structure
molecule_struct_list = [Structure() for x in range(len(molecule_list))]
[x.from_geo_array(molecule_geo_list[i],molecule_ele_list[i])
for i,x in enumerate(molecule_struct_list)]
# Construct whole molecule representations
if whole_molecules:
molecule_struct_list = [construct_smallest_molecule(struct,x,mult=mult)
for x in molecule_struct_list]
return molecule_struct_list
def construct_smallest_molecule(struct,molecule_struct,mult=1.05):
""" Make molecule smallest possible w.r.t. structure pbc
Purpose
-------
Sometimes structures are given where molecules are not fully connected
because all atomic coordinates have been brought into the cell. This
function minimizes the distance between coordinates in the molecule
w.r.t. the pbc of the input structure such that the molecule's final
geometric coordinates are fully connected.
"""
# Check if molecule is fully connected
mol_index_list = find_molecules(molecule_struct,mult=mult)
if len(mol_index_list) == 1:
return molecule_struct
# New method: construct supercell and pick molecule fragment with the same
# length as the input. Doesn't matter which one is chosen because they are
# all images of the same molecule. The final positions will be augmented
# to have COM inside the cell.
temp = copy.deepcopy(molecule_struct)
temp.set_lattice_vectors(struct.get_lattice_vectors())
# Loop through creating supercells of increasing size to find smallest
# molecule representation efficiently.
success = False
for i in range(2,9):
# Construct ixixi supercell about the origin
supercell = construct_supercell_by_molecule(temp, supercell=i,
include_negative=False)
# Get atom index for molecules from supercell
result = find_molecules(supercell, mult=mult)
# Get molecule structure
molecule_list = extract_molecules(supercell, result,
mult=mult,
whole_molecules=False)
# Identify whole molecule in non-periodic cell
frag_list = [len(find_molecules(x, mult=mult)) for x in molecule_list]
try:
whole_molecule_idx = frag_list.index(1)
success = True
break
except:
pass
if success == False:
raise Exception('No whole represenation was found for the molecule '+
'without periodic boundary conditions. Please check the ' +
'structure for any irregularities. If none are found, then '+
'improvements probably need to be made to the source code to '+
'work for this structure.')
whole_molecule = molecule_list[whole_molecule_idx]
geo = whole_molecule.get_geo_array()
# Ensure COM of final molecule is inside cell and smallest possibe
# w.r.t. lattice sites.
COM = com(whole_molecule)
# Lattice vector array as columns
lattice_vectors = np.array(struct.get_lattice_vectors()).T
lattice_vectors_i = np.linalg.inv(lattice_vectors)
relative_COM = np.dot(lattice_vectors_i, COM)
# First make COM all positive w.r.t. lattice vectors
trans_idx = relative_COM < -0.0001
trans_vector = np.dot(lattice_vectors, trans_idx[:,None])
geo = geo + trans_vector.T
# Recompute COM then move inside of cell
temp_molecule = Structure()
temp_molecule.from_geo_array(geo, whole_molecule.geometry['element'])
COM = com(temp_molecule)
relative_COM = np.dot(lattice_vectors_i, COM)
trans_idx = relative_COM > 0.99
trans_vector = np.dot(lattice_vectors, trans_idx[:,None])
geo = geo - trans_vector.T
# Set final molecule
final_molecule = Structure()
final_molecule.from_geo_array(geo, whole_molecule.geometry['element'])
final_molecule.struct_id = molecule_struct.struct_id
return final_molecule
def reconstruct_with_whole_molecules(struct):
""" Build smallest molecule representation of struct.
"""
rstruct = Structure()
rstruct.set_lattice_vectors(struct.get_lattice_vectors())
molecule_struct_list = get_molecules(struct)
for molecule_struct in molecule_struct_list:
geo_array = molecule_struct.get_geo_array()
ele = molecule_struct.geometry['element']
for i,coord in enumerate(geo_array):
rstruct.append(coord[0],coord[1],coord[2],ele[i])
return rstruct
def com(struct):
"""
Calculates center of mass of the system.
"""
geo_array = struct.get_geo_array()
element_list = struct.geometry['element']
mass = np.array([atomic_masses_iupac2016[atomic_numbers[x]]
for x in element_list]).reshape(-1)
total = np.sum(mass)
com = np.sum(geo_array*mass[:,None], axis=0)
com = com / total
return com
def find_translation_vector(f1, f2, lattice_vectors):
"""
From a set of a lattice vectors, find the lattice vector that minimizes the
distance between fragment 1, f1, and fragment 2, f2.
"""
base_atom = len(f1)
full_geo = np.concatenate([f1, f2], axis=0)
x_dist = np.min(calc_euclidean_dist_vectorized(full_geo[:,0][:,None]
)[0:base_atom,base_atom:])
y_dist = np.min(calc_euclidean_dist_vectorized(full_geo[:,1][:,None]
)[0:base_atom,base_atom:])
z_dist = np.min(calc_euclidean_dist_vectorized(full_geo[:,2][:,None]
)[0:base_atom,base_atom:])
min_dist = np.array([[x_dist,y_dist,z_dist]])
closest_vector = np.argmin(cdist(min_dist, lattice_vectors))
# Decide to add or subtract lattice vector
sign = 1
f1_mean = np.mean(f1,axis=0)
f2_mean = np.mean(f2,axis=0)
mean_dist = f2_mean - f1_mean
plus = mean_dist + lattice_vectors[closest_vector,:]
minus = mean_dist - lattice_vectors[closest_vector,:]
if np.sum(np.abs(plus)) > np.sum(np.abs(minus)):
sign = -1
return closest_vector,sign
def get_molecule_orientation(molecule_struct):
"""
Not quite sure what this function needs to do yet, but the indexing for
pymatgen principal axes is shown correctly
Arguments
---------
Pymatgen molecule object
"""
molp = molecule_struct.get_pymatgen_structure()
PGA = pga(molp)
pa = PGA.principal_axes
# axes = np.zeros(3,3)
# for i,row in enumerate(pa):
# axes[i,:] = row
return pa
def get_orr_tensor(struct):
""" Gets orientation of all molecules in the struct """
molecule_list = get_molecules(struct)
orr_tensor = np.zeros((len(molecule_list),3,3))
for i,molecule_struct in enumerate(molecule_list):
orr_tensor[i,:,:] = get_molecule_orientation(molecule_struct)
return orr_tensor
def get_COM(struct):
""" Gets COM positions for all molecules in the structure
Returns
-------
List of all COM positions in the structure
"""
molecule_list = get_molecules(struct)
COM_array = np.zeros((len(molecule_list),3))
for i,molecule_struct in enumerate(molecule_list):
COM_array[i,:] = calc_COM(molecule_struct)
return COM_array
def calc_COM(molecule_struct):
""" COM calculation for Molecule Structure """
geometry = molecule_struct.get_geo_array()
elements = molecule_struct.geometry['element']
element_numbers = [atomic_numbers[x] for x in elements]
element_masses = np.array([atomic_masses_iupac2016[x]
for x in element_numbers])[:,None]
weighted_geometry = geometry*element_masses
return np.sum(weighted_geometry,axis=0) / np.sum(element_masses)
def construct_supercell_by_molecule(struct,supercell=3,include_negative=False):
""" Construct supercell w.r.t. the molecules in the current structure
Arguments
---------
struct: Structure
Structure object that was used to construct the molecules argument,
Must have lattice parameters.
supercell: int
Dimension of supercell (int x int x int)
"""
if supercell <= 0:
raise Exception('Input to construct_supercell must be larger than 0')
lattice_vectors = struct.get_lattice_vectors()
if lattice_vectors == False:
raise Exception('Input Structure object to function '+
'construct_supercell must have lattice parameters.')
lattice_vectors = np.array(lattice_vectors)
# Array for translations to construct supercell
translation_vectors = get_translation_vectors(supercell, lattice_vectors,
include_negative=include_negative)
# Initialize supercell
supercell_struct = Structure()
supercell_struct.set_lattice_vectors(lattice_vectors*supercell)
geo_array = struct.get_geo_array()
# Broadcast geometry with translation vectors
supercell_geo = geo_array[:,None,:] + translation_vectors
num_atoms,num_tr,dim = supercell_geo.shape
# Getting correct indexing for supercell tensor
# Indexing scheme for molecules in first unit cell
depth_index = num_tr*dim*np.arange(num_atoms)
# Broadcast across three dimensions
column_values = np.arange(3)
unit_cell_index = depth_index[:,None] + column_values
# Index scheme for the next unit cells in supercell
molecule_index = np.arange(num_tr)*3
# Broadcast initial molecule across next moleculess
supercell_index = molecule_index[:,None,None] + unit_cell_index
supercell_index = supercell_index.reshape(num_tr*num_atoms, 3)
supercell_geo = np.take(supercell_geo, supercell_index)
###########################################################################
# For example, this gets the original geometry #
###########################################################################
# depth_index = num_tr*dim*np.arange(num_atoms)
# column_values = np.arange(3)
# broadcasted_index = depth_index[:,None] + column_values
###########################################################################
num_ele = translation_vectors.shape[0]
supercell_elements = np.tile(struct.geometry['element'],num_ele)
supercell_struct.from_geo_array(supercell_geo, supercell_elements)
return supercell_struct
def construct_molecular_index_for_supercell(num_atoms, num_tr,
combine_mol=True):
'''
Arguments
---------
num_atoms: int
Number of atoms in the original structure
num_tr: int
Number of translation vectors applied to construct supercell
combine_mol: bool
True: molecules should be combined, such is the case when the desired
output is a single supercell strcutre
False: molecules should not be combined, such is the case when trying
to identify the smallest representation of the molecule w/o
pbc
'''
# Cartesian space
dim = 3
# Getting correct indexing for supercell tensor
# Indexing scheme for molecules in first unit cell
depth_index = num_tr*dim*np.arange(num_atoms)
# Broadcast across three dimensions
column_values = np.arange(3)
unit_cell_index = depth_index[:,None] + column_values
# Index scheme for the next unit cells in supercell
molecule_index = np.arange(num_tr)*3
# Broadcast initial molecule across next moleculess
supercell_index = molecule_index[:,None,None] + unit_cell_index
if combine_mol == True:
return supercell_index.reshape(num_tr*num_atoms, 3)
return supercell_index
def construct_orientation_supercell(struct,supercell,include_negative=False,
molecule_list=[]):
""" Construct supercell of only molecular orientations
Arguments
---------
struct: Structure
Structure object that was used to construct the molecules argument,
Must have lattice parameters.
supercell: int
Dimension of supercell (int x int x int)
molecule_lsit: list of Structures
Can pass in argument if molecule_list was pre-computed
"""
if supercell <= 0:
raise Exception('Input to construct_supercell must be larger than 0')
lattice_vectors = struct.get_lattice_vectors()
if lattice_vectors == False:
raise Exception('Input Structure object to function '+
'construct_supercell must have lattice parameters.')
lattice_vectors = np.array(lattice_vectors)
translation_vectors = get_translation_vectors(supercell, lattice_vectors,
include_negative)
if len(molecule_list) == 0:
molecule_list = get_molecules(struct)
num_atoms = struct.get_geo_array().shape[0]
num_molecules = len(molecule_list)
num_tr = len(translation_vectors)
COM_array = np.array([calc_COM(molecule_struct)
for molecule_struct in molecule_list])
orientation_tensor = np.array([get_molecule_orientation(mol)
for mol in molecule_list])
orientation_tensor = orientation_tensor + COM_array[:,None,:]
orientation_tensor = orientation_tensor[:,None,:] + \
translation_vectors[:,None,:]
orientation_tensor = orientation_tensor.reshape(num_molecules*num_tr,3,3)
COM_tensor = COM_array[:,None,:] + translation_vectors
COM_tensor = COM_tensor.reshape(num_molecules*num_tr,3)
return orientation_tensor,COM_tensor
def get_translation_vectors(supercell, lattice_vectors, include_negative=False):
''' Returns all translation vectors for a given supercell size
Arguments
---------
supercell: int
Value of the supercell dimension. Example 3x3x3
lattice_vectors: Numpy array
Lattice vectors in row format where each lattice vector is one row.
include_negative: bool
False: Only supercells in the positive direction will be constructed
True: Supercells in the positive and negative direction will be
constructed.
If true, constructs the supercell about the origin of the original
Returns: Numpy array of all translation vectors in row format.
'''
if include_negative:
list_range = [x for x in range(-supercell+1,supercell,1)]
else:
list_range = [x for x in range(supercell)]
tr = list(itertools.product(list_range,list_range,list_range))
translation_vectors = np.dot(tr,lattice_vectors)
return translation_vectors
def compute_motif(struct, supercell=3, include_negative=True, num_mol=12):
""" Computes deg_array which is translated into specific packing motifs
Arguments
---------
supercell: int
Value of the supercell dimension. Example 3x3x3
include_negative: bool
False: Only supercells in the positive direction will be constructed
True: Supercells in the positive and negative direction will be
constructed. This will double the number constructed.
num_mol: int >= 4
Number of nearest neighbor molecules to be used for motif
identification. Should be at least four.
"""
deg_array,plane_deg_min = compute_deg_array(struct, supercell,
include_negative, num_mol=num_mol)
return motif_definitions(deg_array,plane_deg_min)
def compute_deg_array(struct, supercell=3, include_negative=True, num_mol=12):
molecule_list = get_molecules(struct)
orientation_tensor,COM_array = construct_orientation_supercell(struct,
supercell,include_negative,
molecule_list)
deg_array,plane_deg_min = compute_orientation_difference(orientation_tensor,COM_array,
molecule_list,num_mol=num_mol)
return deg_array,plane_deg_min
def motif_definitions(deg_array,plane_deg_min):
""" Defines how motifs are identified from the deg_array
Arguments
---------
deg_array: np.array (n,)
Vector of orientation differences found
plane_deg_min: np.array
Vector of orientation differences found for molecules that were
co-planar to the reference molecule
"""
num_mol = 4
if len(deg_array) < num_mol:
raise Exception("For proper motif identification, the input array "+
"must have a length of at least 6. "+
"Input was {}.".format(deg_array))
# Only use first for neighbors
def_deg = deg_array[0:num_mol]
sheet_like = def_deg < 9
# Be more stringent for sheet classification
if np.sum(deg_array < 9) == len(deg_array):
return 'Sheet'
else:
if sheet_like[0] == True:
if sheet_like[1] != True:
if np.sum(plane_deg_min < 9) == len(plane_deg_min):
return 'Sheet'
return 'Sandwich'
else:
# if np.sum(plane_deg_min < 9) == len(plane_deg_min):
# return 'Gamma'
return 'Gamma'
else:
# Have at least 1 co-planar in first 4 neighbors
if np.sum(sheet_like) == 1:
if np.sum(plane_deg_min < 9) == len(plane_deg_min):
return 'Sheet'
return 'Herringbone'
def compute_orientation_difference(orientation_tensor,COM_array,molecule_list,
num_mol=12):
"""
Computes difference between molecular orientation bewteen the molecular
plane and the principal axes of num_mol from the supercell closest to
the molecule in molecule_list closest to the origin.
Arguments
---------
num_mol: int
Should be approximately equal to the number of molecules per unit cell
multiplied by supercell
"""
centerd_orientation_tensor = orientation_tensor - COM_array[:,None]
index_min,index_dist_min = find_nearest_COM(COM_array,
reference=molecule_list, num_mol=num_mol)
molecule_struct_min = molecule_list[index_min]
origin_orientation = centerd_orientation_tensor[index_dist_min]
# Compute norm to molecular plane of original molecule
plane_norm = get_molecule_orientation(molecule_struct_min)[0,:]
original_COM_array = np.array([calc_COM(x) for x in molecule_list])
COM_test = original_COM_array[index_min,:]
origin_COM = COM_array[index_dist_min]
dist_vector = COM_test - origin_COM
dist_vector = dist_vector / np.linalg.norm(dist_vector,axis=1)[:,None]
COM_test = COM_test/np.linalg.norm(COM_test)
COM_angles = np.dot(dist_vector, COM_test)
np.minimum(COM_angles, 1.0, out=COM_angles)
molecule_planes = np.rad2deg(np.arccos(COM_angles))
np.around(molecule_planes,decimals=1, out=molecule_planes)
# Identify if there are any molecular planes
index_plane = np.where((np.abs(molecule_planes-90) < 11) |
(np.abs(molecule_planes-180) < 11) |
(molecule_planes < 11))
orr_diff_array = np.zeros((num_mol,3))
for i,orr2 in enumerate(origin_orientation):
orr_diff_array[i,:] = np.dot(plane_norm,orr2.T)
# Small numerical errors
np.minimum(orr_diff_array, 1.0, out=orr_diff_array)
deg = np.rad2deg(np.arccos(orr_diff_array))
deg_min = np.min(deg,axis=1)
np.around(deg_min,decimals=1, out=deg_min)
return deg_min,deg_min[index_plane]
def find_nearest_COM(COM_array, reference=[], num_mol=12):
""" Find index of nearest num_mol to origin with optional reference list
Arguments
---------
COM_array: np.array nx3
2D matrix of COM positions of all molecules to be indexed
reference: list of Structures
If provided, a list of Structures to be used as the reference molecule.
The molecule closest to the origin of this list is identified and
the num_mol nearest in the COM_array will be indexed.
num_mol: int
Number of nearest neighbors to identify
Returns
-------
index_min: int
Index of the COM found nearest to the origin w.r.t the COM_array or
the reference list if the reference list is provided
index_dist_min: np.array
Vector of index for nearests neighbors to the min COMs
"""
# Get COM of molecule struct closest to origin
if len(reference) > 1:
original_COM_array = np.array([calc_COM(x) for x in reference])
COM_dist = | np.linalg.norm(original_COM_array,axis=1) | numpy.linalg.norm |
# -*- coding: utf-8 -*-
"""
Created on Thu May 30 20:03:50 2019
Finds Vg1 and Vg2 values above a threshold, determined by the ratio of the areas
of a Gaussian fit of the intensity histogram to the total area of the intensities
@author: <NAME>
"""
import numpy as np
import scipy.signal as ss
import scipy.optimize as opt
from scipy.signal import medfilt2d, savgol_filter
from scipy.ndimage import correlate
from sklearn.neighbors import KDTree
import stability as stab
def hist_data(z):
"""
Finds x and y data from histogram
:param z: input
:return: x and y
"""
data = np.histogram(z, bins='scott')
x = data[1]
x = np.array([(x[i] + x[i + 1]) / 2 for i in range(0, len(x) - 1)])
return x, np.array(data[0])
def gauss(x, *params):
return abs(params[2]) * np.exp(-(x - params[0]) ** 2 / (2 * params[1] ** 2))
def multi_gaussian(x, *params):
"""
Fits multiple Gaussian distributions, number of which determined by the number of parameters inputted
"""
y = np.zeros_like(x)
index = np.arange(0, len(params), 3)
if index.size > 1:
for i in range(0, len(params) // 3):
mu = params[i]
sig = params[i + len(params) // 3]
amp = params[i + 2 * len(params) // 3]
y = y + abs(amp) * np.exp(-(x - mu) ** 2 / (2 * sig ** 2))
else:
y = y + abs(params[2]) * np.exp(-(x - params[0]) ** 2 / (2 * params[1] ** 2))
return y
def multi_gauss_background(x, *params):
y = np.zeros_like(x)
index = np.arange(0, len(params) - 2, 3)
if index.size > 1:
y = y + params[0] * x + params[1]
for i in range(0, (len(params) - 2) // 3):
mu = params[i + 2]
sig = params[i + 2 + (len(params) - 2) // 3]
amp = params[i + 2 + 2 * (len(params) - 2) // 3]
y = y + abs(amp) * np.exp(-(x - mu) ** 2 / (2 * sig ** 2))
else:
y = y + params[0] * x + params[1] + abs(params[4]) * np.exp(-(x - params[2]) ** 2 / (2 * params[3] ** 2))
return y
def greedy_guess(guess, x, y):
n = (len(guess) - 2) // 3
m, sig, a = guess[2:n + 2], guess[n + 2:2 * n + 2], guess[2 * n + 2:]
chi = (y - multi_gauss_background(x, *guess)) / multi_gauss_background(x, *guess)
chi = savgol_filter(chi, 3, 2)
m, a = np.append(m, float(x[np.where(chi == np.max(chi))])), np.append(a, float(y[np.where(chi == np.max(chi))]))
sig = np.append(sig, sig[n - 1] / 2)
return np.append(guess[:2], np.append(m, np.append(sig, a)))
def gradient(x, y, z):
"""
Calculates gradient along x and y of intensities to reduce noise
@param x: x vales
@param y: y values
@param z: intensities
@return:
"""
m_z = np.reshape(z, (len(np.unique(y)), len(np.unique(x))))# Transform array into matrix
sg = savgol_filter(m_z, 5, 2) + savgol_filter(m_z, 5, 2, axis=0) # Savgol filter acts as a low pass band filter
signal = sg - np.mean(sg) + np.mean(m_z)
return np.reshape(signal, np.shape(x))
def gradient_exp(x, y, z):
"""
Calculates gradient along x and y of intensities to reduce noise
@param x: x vales
@param y: y values
@param z: intensities
@return:
"""
m_z = np.reshape(z, (len(np.unique(y)), len(np.unique(x))))# Transform array into matrix
diff = [[0, -1, 0], [-1, 5, -1], [0, -1, 0]]
z_diff = correlate(m_z, diff)
sg = savgol_filter(z_diff, 5, 2) + savgol_filter(z_diff, 5, 2, axis=0) # Savgol filter acts as a low pass band filter
signal = sg - np.mean(sg) + np.mean(m_z)
return np.reshape(signal, np.shape(x))
def filtering(x, y, z):
m_z = np.reshape(z, (len(np.unique(y)), len(np.unique(x)))) # Transform array into matrix
s = medfilt2d(m_z)
return np.reshape(s, (int(len(x)),))
def normalise(z):
"""
Unity-based normalisation function, such that all values range between 0 and 1
:param z: Raw data that needs normalising
:return: Normalised data
"""
return np.nan_to_num((z - np.min(z)) / (np.max(z) - np.min(z)))
def fit_gauss(z):
intensity = normalise(z)
x, y = hist_data(intensity)
guess = np.append(0, np.append(np.median(y), np.append(np.median(x[np.where(y == np.max(y))]),
np.append(np.std(x[np.where(y > np.median(y))]),
np.max(y)))))
fit_param, cov = opt.curve_fit(multi_gauss_background, x, y, guess)
if fit_param[2] > 0.5:
index = np.where(intensity<fit_param[2]-3*abs(fit_param[3]))
else:
index = np.where(intensity>fit_param[2]+3*abs(fit_param[3]))
return index
def curved_plane(x, y, param):
return param[0]*x + param[1]*x**2 + param[2]*y + param[3]*y**2 + param[4]*x*y + param[5]
def linear_plane(x, y, param):
return param[0]*x + param[1]*y + param[2]
def minimise_plane(param, x, y, z):
return np.sum((z - linear_plane(x, y, param))**2)
def linear(x, z):
return (np.median(z[np.where(x==np.min(x))])-np.median(z[np.where(x==np.max(x))]))/(np.min(x)-np.max(x))
def remove_background(x, y, z):
p = gradient_exp(x, y, z)
param = np.array((linear(x, z), linear(y,z), np.median(p)))
sol = opt.minimize(minimise_plane, param, args=(x, y, p))
p_n = normalise(p - linear_plane(x, y, sol.x))
return p_n*(np.max(z)-np.min(z)) + np.min(z)
def grad_exp(z, val_x, val_y):
val = z.reshape(val_y, val_x)
scharr = np.array([[ -3-3j, 0-10j, +3 -3j],
[-10+0j, 0+ 0j, +10 +0j],
[ -3+3j, 0+10j, +3 +3j]]) # Gx + j*Gy
grad = ss.convolve2d(val, scharr, boundary='symm', mode='same')
index = np.where(np.logical_or(abs(np.angle(grad).flatten())<=0.15, abs(np.angle(grad).flatten())>=np.pi-0.15))
z[index] = 0
return z
def get_klpq_div(p_probs, q_probs):
# Calcualtes the Kullback-Leibler divergence between pi and qi
kl_div = 0.0
for pi, qi in zip(p_probs, q_probs):
kl_div += pi*np.nan_to_num(np.log(pi/qi))
return kl_div
def D_KL(threshold, x, y):
# Finds best fit Gaussian distribution and calculates the corresponding Kullback-Leibler divergence
index = np.where(np.logical_and(x>=threshold[0], x<=threshold[1]))
xs, ys = x[index], y[index]
if np.trapz(ys)>0:
ys = ys/np.trapz(ys)
else:
return np.inf
guess = np.append(np.median(xs[np.where(ys == np.max(ys))]),
np.append(np.std(xs[np.where(ys > np.median(ys))]),
np.max(ys)))
bounds = ((np.min(x)-np.std(x), np.std(x)/10**4, np.mean(ys)), (np.max(x)+np.std(x), np.max(x)-np.min(x), 10*np.max(ys)))
fit_param, cov = opt.curve_fit(gauss, xs, ys, guess, bounds=bounds)
return get_klpq_div(ys+10**-7, gauss(xs, *fit_param)+10**-7) # Add small epsilon to ensure that we donn't devide by zero
def minimise_DKL(x, y):
# Estimate first guess and boundaries to use:
guess = np.append(np.median(x[np.where(y == np.max(y))]),
np.append(np.std(x[np.where(y > np.median(y))]),
np.max(y)))
b = ((np.min(x)-np.std(x), np.std(x)/10**4, np.mean(y)), (np.max(x)+np.std(x), np.max(x)-np.min(x), np.max(y)*10))
fit_param, cov = opt.curve_fit(gauss, x, y, guess, bounds=b)
x0 = [fit_param[0]-2*fit_param[1], fit_param[0]+2*fit_param[1]]
bound = ((np.min(x), fit_param[0]-fit_param[1]), (fit_param[0]+fit_param[1], np.max(x)))
# Find optimal bound solutions
sol = opt.minimize(D_KL, x0, jac=None, method='L-BFGS-B', options={'eps':1/len(x)}, args=(x, y), bounds=bound)
return sol.x
def threshold_DKL(z):
intensity = normalise(z)
x, y = hist_data(intensity)
y = y**0.5 # Broadens peak to allow to identify finer structure in the intensity
threshold = minimise_DKL(x, y)
if abs(np.max(z))>abs(np.min(z)):
index = np.where(intensity>=threshold[1])
else:
index = np.where(intensity<=threshold[0])
return index
def threshold(z, val):
if abs(np.max(z))>abs(np.min(z)):
v = abs(np.min(z))*0.9
else:
v = -abs( | np.max(z) | numpy.max |
#In[1]:
import matplotlib.pyplot as plt
from matplotlib import cm
import Putil.sampling.MCMC.metropolis_hasting as mh
import numpy as np
def rf():
#return [np.random.choice(ss, size=1), np.random.choice(ss, size=1)]
return [np.random.random() * 2 - 1, np.random.random() * 2 - 1]
pass
def pdff(x):
return 1 / (2 * np.pi * np.sqrt(1 - 0.85)) * np.exp(1 / (2 * (1 - 0.85)) * (- x[0]**2 - x[1]**2) * x[0] ** 2 * x[1] ** 2)
pass
ss = np.linspace(-1, 1, 100)
a = mh.MetropolisHasting()
a.set_random_func(rf)
a.set_pdf_func(pdff)
xs = list()
for i in range(0, 20000):
xs.append(a.sample())
pass
xs = | np.array(xs) | numpy.array |
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import h5py as h5
from scipy.optimize import curve_fit
import scipy.constants as sc
import os
from scipy.integrate import simps
def gauss(x, a, x0, sigma, offset):
return a*np.exp(-(x-x0)**2/(2*sigma**2))+offset
def getData(fileName, sensorName):
"""
Read data from .h5 file, remove nans
Parameters
----------
fileName : str
name of file to be read
sensorName : str
name of sensor whos data is loaded
Returns
-------
x : array
Array of x values visited
y : array
Array of y values visited
s : array
Array containing Beam Loss measured at (x,y)
"""
f = h5.File(fileName, 'r')
x = np.array(f['PositionHEX']["x"])
y = np.array(f['PositionHEX']["y"])
s = np.array(f[sensorName])
# x, y, s = pwt.removeNan([x,y,s])
return x,y,s
def removeNan(dataList):
isNan = np.logical_or.reduce([np.isnan(d) for d in dataList])
idx = np.argwhere(np.logical_not(isNan)).flatten()
return [d[idx] for d in dataList]
def getDataManual(fileName, sensorName):
f = h5.File(fileName, 'r')
x = np.array(f['PositionHEX']["x"])
y = np.array(f['PositionHEX']["y"])
s = np.array(f['SATSY03-DLAC080-DHXP:BLM1_comp'])
x, y, s = removeNan([x,y,s])
nShots=40
setupFilename = 'D:/Measurement Data/ATHOS/20201002/ws/ws_20201003_051658/scanPreparation.h5'
fSetup = h5.File(setupFilename, 'r')
edges = [fSetup['Points/P'+str(i)].value for i in range(18)]
xFit = []
yFit = []
sFit = []
for i in range(9):
xStart = edges[2*i][0]
xEnd = edges[2*i+1][0]
yStart = edges[2*i][1]
yEnd = edges[2*i+1][1]
xFit.extend(np.linspace(xStart, xEnd, nShots))
yFit.extend(np.linspace(yStart, yEnd, nShots))
xFit = np.array(xFit)
yFit = np.array(yFit)
# fFit = interp2d(x, y, s, fill_value=0, kind = 'cubic')
# sFit = np.array([fFit(xFit[i], yFit[i]) for i in range(len(xFit))])
for i in range(len(xFit)):
distance = np.sqrt((x-xFit[i])**2+(y-yFit[i])**2)
sFit.append(s[np.argmin(distance)])
sFit = np.array(sFit)
return xFit,yFit,sFit
def getDataRaw(fileName, sensorName):
"""
Read data from .h5 file, remove nans
Parameters
----------
fileName : str
name of file to be read
sensorName : str
name of sensor whos data is loaded
Returns
-------
x : array
Array of x values visited
y : array
Array of y values visited
s : array
Array containing Beam Loss measured at (x,y)
"""
f = h5.File(fileName, 'r')
x = np.array(f['PositionHEX']["x"])
y = np.array(f['PositionHEX']["y"])
s = np.array(f[sensorName])
start = 503
stop = 523
offset = np.average(s[:, 0:40], axis=1)
sB2 = [s[i, start:stop]-offset[i] for i in range(len(offset))]
sB2 = np.array(sB2)
# signal = -np.sum(sB2, axis=1)
signal = -simps(sB2, axis=1)
# plt.plot(signal)
return x,y,signal*1e-3
def loadAndPrepareInput(fileName, sensor='SensorValue/SATCL01-DBLM135:B2_LOSS', nWiresInFile=9, wiresUsed=[0,1,2,3,4,5,6,7,8], map180=False,nPerPoint = 1, normalize = True, manualSave = False):
"""
load data from file, transform it into displacement and angle formulation
Parameters
----------
fileName : str
name of file to be read
sensor : str
name of sensor whos data is loaded
nWiresInFile: int
number of wires contained in the loaded measurement
wiresUsed : list/array
list or array selecting which wires are included in the dataset returned
map180 : boolean
switch to turn on and of projectin of the 0-360 interval to a 0-180 interval
When True, mapping to 180 deg will take place
Returns
-------
s : array
Array containing Beam Loss measured at (d,angle)
d : array
Array of displacements d visited
angle : array
Array of angles visited
"""
# x, y, s = getData(fileName, sensor)
if manualSave:
x, y, s = getDataManual(fileName, sensor)
else:
x, y, s = getData(fileName, sensor)
# x, y, s = getDataRaw(fileName, 'SensorValue/SATCL01-DBLM135:LOSS_SIGNAL_RAW')
x = x.reshape((nWiresInFile, -1))
y = y.reshape((nWiresInFile, -1))
s = s.reshape((nWiresInFile, -1))
x, y, s = x[wiresUsed], y[wiresUsed], s[wiresUsed]
# s=s/max(s)
#get displacements and angles
[d, angle] = transformRep(x,y, map180)
for i in range(d.shape[0]):
initGuess = (np.max(s[i])-np.min(s[i]), d[i][np.argmax(s[i])], 0.1*(np.max(d[i])-np.min(d[i])), np.min(s[i]))
fit = curve_fit(gauss, d[i], s[i], p0 = initGuess)[0]
d[i] -= fit[1]
if normalize:
s[i] -= fit[3]
# com = np.sum(d[i]*s[i])/np.sum(s[i])
# d[i]-=com
# d[i] -= d[i][np.argmax(s[i])]
# print(d[i][np.argmax(s[i])])
if nPerPoint != 1:
s = s.reshape(-1,nPerPoint).mean(axis = 1).reshape(len(wiresUsed),-1)
d = d.reshape(-1,nPerPoint).mean(axis = 1).reshape(len(wiresUsed),-1)
angle = angle.reshape(-1,nPerPoint).mean(axis = 1).reshape(len(wiresUsed),-1)
if normalize:
return s/np.max(s),d,angle
else:
return s,d,angle
def transformRep(x,y, spacingWraped180):
d = | np.zeros_like(x) | numpy.zeros_like |
"""
First created on Mon Aug 13 10:01:03 2018
Main code for the creation of the image for Zernike analysis;
Other moduls avaliable are:
Zernike_Cutting_Module
Zernike_Analysis_Module
Versions:
Oct 31, 2018; 0.1 -> 0.11 fixed FRD effect
Nov 1, 2018; 0.11 -> 0.12 added correct edges to the detector; fixed wrong behavior for misaligment
Nov 2, 2018; 0.12 -> 0.13 added lorentzian wings to the illumination of the pupil
Nov 3, 2018; 0.13 -> 0.13b fixed edges of detector when det_vert is not 1
Nov 12, 2018; 0.13b -> 0.13c changed parameter describing hexagonal effect "f" from 0.1 to 0.2
Nov 12, 2018; 0.13c -> 0.14 changed illumination description modifying entrance -> exit pupil illumination
Nov 29, 2018; 0.14 -> 0.14b added fixed scattering slope, deduced from large image in focus
Dec 16, 2018; 0.14b -> 0.14c allparameters_proposal_err from list to array
Dec 18, 2018; 0.14c -> 0.14d strutFrac upper limit to 0.13 in create_parInit
Dec 23, 2018; 0.14d -> 0.15 refactoring so that x_ilum and y_ilum is one
Dec 26, 2018; 0.15 -> 0.15b when in focus, create exactly 10x oversampling
Dec 31, 2018; 0.15b -> 0.16 major rewrite of downsampling algorithm
Jan 8, 2019; 0.16 -> 0.17 added support for zmax=22
Jan 14, 2019; 0.17 -> 0.18 fixed bug with dowsampling algorithm - I was just taking central values
Jan 15, 2019; 0.18 -> 0.19 added simple algorithm to interpolate between 1/10 pixels in the best position
Feb 15, 2019; 0.19 -> 0.20 updated analysis for the new data
Feb 21, 2019; 0.20 -> 0.20b test parameter for showing globalparamers outside their limits
Feb 22, 2019; 0.20 -> 0.21 added support for Zernike higher than 22
Feb 22, 2019; 0.21 -> 0.21b added support for return image along side likelihood
Apr 17, 2019; 0.21b -> 0.21c changed defintion of residuals from (model-data) to (data-model)
Jun 4, 2019; 0.21c -> 0.21d slight cleaning of the code, no functional changes
Jun 26, 2019; 0.21d -> 0.21e included variable ``dataset'',
which denots which data we are using in the analysis
Jul 29, 2019; 0.21e -> 0.21f changed the spread of paramters when drawing initial solutions, based on data
Sep 11, 2019; 0.21f -> 0.21g globalparameters_flat_6<1 to globalparameters_flat_6<=1
Oct 10, 2019: 0.21g -> 0.21h scattered_light_kernel saving option
Oct 31, 2019: 0.21h -> 0.22 (re)introduced small amount of apodization (PIPE2D-463)
Oct 31, 2019: 0.22 -> 0.22b introduced verbosity
Nov 07, 2019: 0.22b -> 0.22c nan values can pass through find_single_realization_min_cut
Nov 08, 2019: 0.22c -> 0.22d changes to resizing and centering
Nov 13, 2019: 0.22d -> 0.23 major changes to centering - chief ray in the center of oversampled image
Nov 15, 2019: 0.23 -> 0.24 change likelihood definition
Dec 16, 2019: 0.24 -> 0.24a added iluminaton with z4,z11,z22=0
Jan 14, 2020: 0.24a -> 0.24b added verbosity in find_single_realization_min_cut function
Jan 31, 2020: 0.24b -> 0.25 added support for data contaning spots from two wavelengths
Feb 11, 2020: 0.25 -> 0.26 proper bilinear interpolation of the spots
Feb 17, 2020: 0.26 -> 0.26a increased speed when save parameter=0
Feb 18, 2020: 0.26a -> 0.26b mask image going through subpixel interpolation
Feb 19, 2020: 0.26b -> 0.26c normalization of sci image takes into account mask
Mar 1, 2020: 0.26c -> 0.27 apodization scales with the size of input images
Mar 4, 2020: 0.27 -> 0.28 (re-)introduced custom size of pupil image
Mar 6, 2020: 0.28 -> 0.28b refactored cut_square function (making it much faster)
Mar 8, 2020: 0.28b -> 0.28c set limit in grating factor to 120000 in generating code
Apr 1, 2020: 0.28c -> 0.28d svd_invert function
May 6, 2020: 0.28d -> 0.28e clarified and expanded comments in postprocessing part
Jun 28, 2020: 0.28e -> 0.29 added multi analysis
Jul 02, 2020: 0.29 -> 0.30 added internal fitting for flux
Jul 02, 2020: 0.30 -> 0.30a lnlike_Neven_multi_same_spot can accept both 1d and 2d input
Jul 07, 2020: 0.30a -> 0.30b added threading time information
Jul 09, 2020: 0.30b -> 0.30c expwf_grid changed to complex64 from complex128
Jul 09, 2020: 0.30c -> 0.30d changed all float64 to float32
Jul 16, 2020: 0.30d -> 0.31 moved all fft to scipy.signal.fftconvolve
Jul 20, 2020: 0.31 -> 0.32 introduced renormalization_of_var_sum for multi_var analysis
Jul 26, 2020: 0.32 -> 0.32a only changed last value of allparameters if len()==42
Aug 10, 2020: 0.32a -> 0.33 added extra Zernike to parInit
Aug 12, 2020: 0.33 -> 0.33a changed iters to 6 in fluxfit
Sep 08, 2020: 0.33a -> 0.33b added test_run to help with debugging
Oct 05, 2020: 0.33b -> 0.33c trying to always output flux multiplier when fit_for_flux
Oct 06, 2020: 0.33c -> 0.34 added posibility to specify position of created psf
Oct 13, 2020: 0.34 -> 0.34b added finishing step of centering, done with Nelder-Mead
Oct 22, 2020: 0.34b -> 0.35 added class that does Tokovinin multi analysis
Nov 03, 2020: 0.35 -> 0.35a create parInit up to z=22, with larger parametrization
Nov 05, 2020: 0.35a -> 0.35b return same value if Tokovinin does not work
Nov 16, 2020: 0.35b -> 0.35c modified movement of parameters
Nov 17, 2020: 0.35c -> 0.35d small fixes in check_global_parameters with paramters 0 and 1
Nov 19, 2020: 0.35d -> 0.36 realized that vertical strut is different than others -
first, simplest implementation
Nov 19, 2020: 0.36 -> 0.36a modified parInit movements for multi (mostly reduced)
Dec 05, 2020: 0.36a -> 0.37 misalignment and variable strut size
Dec 13, 2020: 0.37 -> 0.37a changed weights in multi_same_spot
Jan 17, 2021: 0.37a -> 0.37b accept True as input for simulation00
Jan 25, 2021: 0.37b -> 0.37c fixed fillCrop function in PsfPosition, slice limits need to be integers
Jan 26, 2021: 0.37c -> 0.38 PIPE2D-701, fixed width of struts implementation
Jan 28, 2021: 0.38 -> 0.39 added flux mask in chi**2 calculation
Jan 28, 2021: 0.39 -> 0.39b lowered allowed values for pixel_effect and fiber_r
Feb 08, 2021: 0.39b -> 0.4 fixed bilinear interpolation for secondary, x and y confusion
Feb 25, 2021: 0.4 -> 0.40a added directory for work on Tiger
Mar 05, 2021: 0.40a -> 0.41 introduced create_custom_var function
Mar 08, 2021: 0.41 -> 0.41a added suport for saving intermediate images to tiger
Mar 24, 2021: 0.41a -> 0.41b added support for masked images in find_centroid_of_flux
Mar 26, 2021: 0.41b -> 0.41c added create_custom_var function as a separate function
Mar 26, 2021: 0.41c -> 0.41d semi-implemented custom variance function in Tokovinin algorithm
Mar 26, 2021: 0.41d -> 0.41e model_multi_out has correct input parameters now
Apr 01, 2021: 0.41e -> 0.42 changed bug/feature in checking wide_43 and wide_42 parameters
Apr 02, 2021: 0.42 -> 0.43 changed width of slit shadow and slit holder shadow
Apr 04, 2021: 0.43 -> 0.44 implemented f_multiplier_factor
Apr 04, 2021: 0.44 -> 0.44a implemented possibility for using np.abs(chi) as likelihood
Apr 08, 2021: 0.44a -> 0.44b propagated change from 0.44a to Tokovinin algorithm
Apr 12, 2021: 0.44b -> 0.44c modified renormalization factors for abs(chi) value
Apr 13, 2021: 0.44c -> 0.44d fixed bug in the estimate of mean_value_of_background
Apr 14, 2021: 0.44d -> 0.44e mean_value_of_background estimated from sci or var data
Apr 22, 2021: 0.44e -> 0.44f introduced multi_background_factor
Apr 27, 2021: 0.44f -> 0.45 Tokovinin now works much quicker with multi_background_factor
(create_simplified_H updated)
Apr 29, 2021: 0.45 -> 0.45a many changes in order to run create_simplified_H efficently
May 07, 2021: 0.45a -> 0.45b if Premodel analysis failed, return 15 values
May 08, 2021: 0.45b -> 0.45c changed that images of same size do not crash out_images creation
May 14, 2021: 0.45c -> 0.45d create_parInit, changed from <> to <= and >=
May 18, 2021: 0.45d -> 0.45e testing focus constrain in Tokovinin
May 19, 2021: 0.45e -> 0.45f expanded verbosity messages in Tokovinin algorithm
May 19, 2021: 0.45f -> 0.45g testing [8., 8., 8., 8., 1., 8., 8., 8., 8.] renormalization
May 20, 2021: 0.45g -> 0.45h do not use multi_background for image in or near focus
May 27, 2021: 0.45h -> 0.45i reordered variables in LN_PFS_single, in preparation for wv analysis
May 27, 2021: 0.45i -> 0.46 changed oversampling to be always 10
Jun 08, 2021: 0.46 -> 0.46a changed to Psf_position to be able to take only_chi and center of flux
Jun 08, 2021: 0.46a -> 0.46b changed normalization so that in focus it is indentical as in pipeline
Jun 15, 2021: 0.46b -> 0.46c change limit on the initial cut of the oversampled image,
in order to handle bluer data
Jun 19, 2021: 0.46c -> 0.46d changed skimage.transform.resize to resize,
to avoid skimage.transform not avaliable in LSST
Jun 20, 2021: 0.46d -> 0.46e changed scipy.signal to signal,
and require that optPsf_cut_downsampled_scattered size is int /
no change to unit test
Jun 24, 2021: 0.46e -> 0.47 removed resize and introduced galsim resizing in Psf_position,
to be consistent with LSST pipeline
Jun 25, 2021: 0.47 -> 0.47a introduced galsim resizing in the first downsampling from natural resolution
to default=10 oversampling also
Jul 11, 2021: 0.47a -> 0.47b changed a minus factor in secondary position estimation
Jul 12, 2021: 0.47b -> 0.47c inital offset in positioning had a wrong +- sign in front
Jul 23, 2021: 0.47c -> 0.47d (only) added comments and explanations
Jul 26, 2021: 0.47d -> 0.47e changed default oversampling to 11
Jul 27, 2021: 0.47e -> 0.47f offset done in galsim, but downsampling via resize function
Aug 26, 2021: 0.47f -> 0.47g direct minimization when use_center_of_flux=True
Aug 30, 2021: 0.47g -> 0.48 offset done in LSST code now
Sep 02, 2021: 0.48 -> 0.48a done cleaning offset code (PIPE2D-880)
Sep 15, 2021: 0.48a -> 0.48b removed minor bug where array_of_var_sum was called too early,
and could fail if nan value was present
Sep 27, 2021: 0.48b -> 0.48c added explicit bool conversion to double_sources
Oct 05, 2021: 0.48c -> 0.48d further explicit bool(double_sources) covnersion in ln_pfs_single
Oct 08, 2021: 0.48d -> 0.48e Pep8 cleaning
Oct 15, 2021: 0.48e -> 0.48f forced a randomseed number in create_parInit function
Oct 25, 2021: 0.48f -> 0.49 set half of init values in create_parInit to be same as init value
Oct 26, 2021: 0.49 -> 0.49a modified create_custom_var that it does lin fit if 2nd degree fit is convex
Oct 28, 2021: 0.49a -> 0.49b modified create_custom_var so that it does not fall below min(var) value
Nov 01, 2021: 0.49b -> 0.49c create_custom_var does not change var image from step to step anymore
Nov 02, 2021: 0.49c -> 0.49d eliminated std varianble from create_simplified_H
Nov 03, 2021: 0.49d -> 0.49e PIPE2D-930; fixed reusing list_of_variance in Tokovinin
Nov 03, 2021: 0.49e -> 0.50 PIPE2D-931; modified creation of polyfit for variance image higher up
so it is done only once per sci/var/mask image combination
Nov 20, 2021: 0.50 -> 0.50a Hilo modifications
Dec 06, 2021: 0.50a -> 0.51 Zernike_estimation_preparation class
Dec 09, 2021: 0.51 -> 0.51a introduced `fixed_single_spot`
Feb 11, 2022: 0.51a -> 0.51b unified index parameter allowed to vary
Mar 18, 2022: 0.51b -> 0.51c introduced div_same par, controlling how many particles are same
Mar 24, 2022: 0.51c -> 0.51d multiple small changes, for running same illum in fiber
Apr 03, 2022: 0.51d -> 0.51e test is now analysis_type_fiber == "fixed_fiber_par"
May 05, 2022: 0.51e -> 0.51f added documentation
May 09, 2022: 0.51f -> 0.51g replaced print with logging
May 24, 2022: 0.51g -> 0.51h small changes to output testing directory
May 26, 2022: 0.51h -> 0.51i linting fixes
Jun 01, 2022: 0.51i -> 0.52 im1.setCenter(0,0), to be compatible with galsim 2.3.4
@author: <NAME>
@contact: <EMAIL>
@web: www.ncaplar.com
"""
########################################
# standard library imports
# from __future__ import absolute_import, division, logging.info_function
from functools import partial
from typing import Tuple, Iterable
# import matplotlib
# from matplotlib.colors import LogNorm
# import matplotlib.pyplot as plt
import lmfit
from scipy.linalg import svd
from scipy import signal
from scipy.ndimage.filters import gaussian_filter
import scipy.fftpack
import scipy.misc
from scipy.special import erf
from astropy.convolution import Gaussian2DKernel
from astropy.convolution import Tophat2DKernel
import lsst.afw.math
import lsst.afw.image
import lsst.afw
import lsst
import galsim
import traceback
# import platform
import threading
# from multiprocessing import current_process
import numpy as np
import os
import time
# import sys
import math
import socket
import sys
import pickle
import logging
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"
np.set_printoptions(suppress=True)
np.seterr(divide='ignore', invalid='ignore')
# logging.info(np.__config__)
########################################
# Related third party imports
# none at the moment
########################################
# Local application/library specific imports
# galsim
galsim.GSParams.maximum_fft_size = 12000
# lsst
# astropy
# import astropy
# import astropy.convolution
# scipy
# import scipy
# import skimage.transform
# import scipy.optimize as optimize
# for svd_invert function
# lmfit
# matplotlib
# needed for resizing routines
# for distributing image creation in Tokovinin algorithm
########################################
__all__ = [
'PupilFactory',
'Pupil',
'ZernikeFitterPFS',
'LN_PFS_multi_same_spot',
'LN_PFS_single',
'LNP_PFS',
'find_centroid_of_flux',
'create_parInit',
'PFSPupilFactory',
'custom_fftconvolve',
'stepK',
'maxK',
'sky_scale',
'sky_size',
'remove_pupil_parameters_from_all_parameters',
'resize',
'_interval_overlap',
'svd_invert',
'Tokovinin_multi',
'find_centroid_of_flux',
'create_popt_for_custom_var',
'create_custom_var_from_popt',
'Zernike_estimation_preparation']
__version__ = "0.52"
# classes Pupil, PupilFactory and PFSPupilFactory have different form of documentation,
# compared to other classes as they have been imported from code written by <NAME>
class Pupil(object):
"""!Pupil obscuration function.
"""
def __init__(self, illuminated, size, scale):
"""!Construct a Pupil
@param[in] illuminated 2D numpy array indicating which parts of
the pupil plane are illuminated.
@param[in] size Size of pupil plane array in meters. Note
that this may be larger than the actual
diameter of the illuminated pupil to
accommodate zero-padding.
@param[in] scale Sampling interval of pupil plane array in
meters.
"""
self.illuminated = illuminated
self.size = size
self.scale = scale
class PupilFactory(object):
"""!Pupil obscuration function factory for use with Fourier optics.
Based on the code by <NAME>, developed for HSC camera
Contains functions that can create various obscurations in the camera
"""
def __init__(
self,
pupilSize,
npix,
input_angle,
detFrac,
strutFrac,
slitFrac,
slitFrac_dy,
x_fiber,
y_fiber,
effective_ilum_radius,
frd_sigma,
frd_lorentz_factor,
det_vert,
wide_0=0,
wide_23=0,
wide_43=0,
misalign=0,
verbosity=0):
"""Construct a PupilFactory.
Parameters
----------
pupilSize: `float`
Size of the exit pupil [m]
npix: `int`
Constructed Pupils will be npix x npix
input_angle: `float`
Angle of the pupil (for all practical purposes fixed an np.pi/2)
detFrac: `float`
Value determining how much of the exit pupil obscured by the
central obscuration(detector)
strutFrac: `float`
Value determining how much of the exit pupil is obscured
by a single strut
slitFrac: `float`
Value determining how much of the exit pupil is obscured by slit
slitFrac_dy: `float`
Value determining what is the vertical position of the slit
in the exit pupil
x_fiber: `float`
Position of the fiber misaligment in the x direction
y_fiber: `float`
Position of the fiber misaligment in the y direction
effective_ilum_radius: `float`
Fraction of the maximal radius of the illumination
of the exit pupil that is actually illuminated
frd_sigma: `float`
Sigma of Gaussian convolving only outer edge, mimicking FRD
frd_lorentz_factor: `float`
Strength of the lorentzian factor describing wings
det_vert: `float`
Multiplicative factor determining vertical size
of the detector obscuration
wide_0: `float`
Widening of the strut at 0 degrees
wide_23: `float`
Widening of the strut at the top-left corner
wide_43: `float`
Widening of the strut at the bottom-left corner
misalign: `float`
Describing the amount of misaligment
verbosity: `int`
How verbose during evaluation (1 = full verbosity)
"""
self.verbosity = verbosity
if self.verbosity == 1:
logging.info('Entering PupilFactory class')
logging.info('Entering PupilFactory class')
self.pupilSize = pupilSize
self.npix = npix
self.input_angle = input_angle
self.detFrac = detFrac
self.strutFrac = strutFrac
self.pupilScale = pupilSize / npix
self.slitFrac = slitFrac
self.slitFrac_dy = slitFrac_dy
self.effective_ilum_radius = effective_ilum_radius
self.frd_sigma = frd_sigma
self.frd_lorentz_factor = frd_lorentz_factor
self.det_vert = det_vert
self.wide_0 = wide_0
self.wide_23 = wide_23
self.wide_43 = wide_43
self.misalign = misalign
u = (np.arange(npix, dtype=np.float32) - (npix - 1) / 2) * self.pupilScale
self.u, self.v = np.meshgrid(u, u)
@staticmethod
def _pointLineDistance(p0, p1, p2):
"""Compute the right-angle distance between the points given by `p0`
and the line that passes through `p1` and `p2`.
@param[in] p0 2-tuple of numpy arrays (x,y coords)
@param[in] p1 2-tuple of scalars (x,y coords)
@param[in] p2 2-tuple of scalars (x,y coords)
@returns numpy array of distances; shape congruent to p0[0]
"""
x0, y0 = p0
x1, y1 = p1
x2, y2 = p2
dy21 = y2 - y1
dx21 = x2 - x1
return np.abs(dy21 * x0 - dx21 * y0 + x2 * y1 - y2 * x1) / np.hypot(dy21, dx21)
def _fullPupil(self):
"""Make a fully-illuminated Pupil.
@returns Pupil
"""
illuminated = np.ones(self.u.shape, dtype=np.float32)
return Pupil(illuminated, self.pupilSize, self.pupilScale)
def _cutCircleInterior(self, pupil, p0, r):
"""Cut out the interior of a circular region from a Pupil.
@param[in,out] pupil Pupil to modify in place
@param[in] p0 2-tuple indicating region center
@param[in] r Circular region radius
"""
r2 = (self.u - p0[0])**2 + (self.v - p0[1])**2
pupil.illuminated[r2 < r**2] = False
def _cutCircleExterior(self, pupil, p0, r):
"""Cut out the exterior of a circular region from a Pupil.
@param[in,out] pupil Pupil to modify in place
@param[in] p0 2-tuple indicating region center
@param[in] r Circular region radius
"""
r2 = (self.u - p0[0])**2 + (self.v - p0[1])**2
pupil.illuminated[r2 > r**2] = False
def _cutEllipseExterior(self, pupil, p0, r, b, thetarot):
"""Cut out the exterior of a circular region from a Pupil.
@param[in,out] pupil Pupil to modify in place
@param[in] p0 2-tuple indicating region center
@param[in] r Ellipse region radius = major axis
@param[in] b Ellipse region radius = minor axis
@param[in] thetarot Ellipse region rotation
"""
r2 = (self.u - p0[0])**2 + (self.v - p0[1])**2
theta = np.arctan(self.u / self.v) + thetarot
pupil.illuminated[r2 > r**2 * b**2 / (b**2 * (np.cos(theta))**2 + r**2 * (np.sin(theta))**2)] = False
def _cutSquare(self, pupil, p0, r, angle, det_vert):
"""Cut out the interior of a circular region from a Pupil.
@param[in,out] pupil Pupil to modify in place
@param[in] p0 2-tuple indicating region center
@param[in] r half lenght of the length of square side
@param[in] angle angle that the camera is rotated
@param[in] det_vert multiplicative factor that distorts the square into a rectangle
"""
pupil_illuminated_only1 = np.ones_like(pupil.illuminated, dtype=np.float32)
time_start_single_square = time.time()
###########################################################
# Central square
if det_vert is None:
det_vert = 1
x21 = -r / 2 * det_vert * 1
x22 = +r / 2 * det_vert * 1
y21 = -r / 2 * 1
y22 = +r / 2 * 1
i_max = self.npix / 2 - 0.5
i_min = -i_max
i_y_max = int(np.round((x22 + p0[1]) / self.pupilScale - (i_min)))
i_y_min = int(np.round((x21 + p0[1]) / self.pupilScale - (i_min)))
i_x_max = int(np.round((y22 + p0[0]) / self.pupilScale - (i_min)))
i_x_min = int(np.round((y21 + p0[0]) / self.pupilScale - (i_min)))
assert angle == np.pi / 2
# angleRad = angle
camX_value_for_f_multiplier = p0[0]
camY_value_for_f_multiplier = p0[1]
# logging.info(camX_value_for_f_multiplier,camY_value_for_f_multiplier)
camY_Max = 0.02
f_multiplier_factor = (-camX_value_for_f_multiplier * 100 / 3) * \
(np.abs(camY_value_for_f_multiplier) / camY_Max) + 1
# f_multiplier_factor=1
if self.verbosity == 1:
logging.info('f_multiplier_factor for size of detector triangle is: ' + str(f_multiplier_factor))
pupil_illuminated_only0_in_only1 = np.zeros((i_y_max - i_y_min, i_x_max - i_x_min))
u0 = self.u[i_y_min:i_y_max, i_x_min:i_x_max]
v0 = self.v[i_y_min:i_y_max, i_x_min:i_x_max]
# factor that is controling how big is the triangle in the corner of the detector?
f = 0.2
f_multiplier = f_multiplier_factor / 1
###########################################################
# Lower right corner
x21 = -r / 2
x22 = +r / 2
y21 = -r / 2 * det_vert
y22 = +r / 2 * det_vert
f_lr = np.copy(f) * (1 / f_multiplier)
angleRad21 = -np.pi / 4
triangle21 = [[p0[0] + x22, p0[1] + y21],
[p0[0] + x22, p0[1] + y21 - y21 * f_lr],
[p0[0] + x22 - x22 * f_lr, p0[1] + y21]]
p21 = triangle21[0]
y22 = (triangle21[1][1] - triangle21[0][1]) / np.sqrt(2)
y21 = 0
x21 = (triangle21[2][0] - triangle21[0][0]) / np.sqrt(2)
x22 = -(triangle21[2][0] - triangle21[0][0]) / np.sqrt(2)
pupil_illuminated_only0_in_only1[((v0 - p21[1]) * np.cos(-angleRad21)
- (u0 - p21[0]) * np.sin(-angleRad21) < y22)] = True
###########################################################
# Upper left corner
x21 = -r / 2 * 1
x22 = +r / 2 * 1
y21 = -r / 2 * det_vert
y22 = +r / 2 * det_vert
# angleRad12 = -np.pi / 4
f_ul = np.copy(f) * (1 / f_multiplier)
triangle12 = [[p0[0] + x21, p0[1] + y22],
[p0[0] + x21, p0[1] + y22 - y22 * f_ul],
[p0[0] + x21 - x21 * f_ul, p0[1] + y22]]
p21 = triangle12[0]
y22 = 0
y21 = (triangle12[1][1] - triangle12[0][1]) / np.sqrt(2)
x21 = -(triangle12[2][0] - triangle12[0][0]) / np.sqrt(2)
x22 = +(triangle12[2][0] - triangle12[0][0]) / np.sqrt(2)
pupil_illuminated_only0_in_only1[((v0 - p21[1]) * np.cos(-angleRad21)
- (u0 - p21[0]) * np.sin(-angleRad21) > y21)] = True
###########################################################
# Upper right corner
x21 = -r / 2 * 1
x22 = +r / 2 * 1
y21 = -r / 2 * det_vert
y22 = +r / 2 * det_vert
f_ur = np.copy(f) * f_multiplier
triangle22 = [[p0[0] + x22, p0[1] + y22],
[p0[0] + x22, p0[1] + y22 - y22 * f_ur],
[p0[0] + x22 - x22 * f_ur, p0[1] + y22]]
p21 = triangle22[0]
y22 = -0
y21 = +(triangle22[1][1] - triangle22[0][1]) / np.sqrt(2)
x21 = +(triangle22[2][0] - triangle22[0][0]) / np.sqrt(2)
x22 = -(triangle22[2][0] - triangle22[0][0]) / np.sqrt(2)
pupil_illuminated_only0_in_only1[((u0 - p21[0]) * np.cos(-angleRad21)
+ (v0 - p21[1]) * np.sin(-angleRad21) > x21)] = True
###########################################################
# Lower left corner
x21 = -r / 2 * 1
x22 = +r / 2 * 1
y21 = -r / 2 * det_vert
y22 = +r / 2 * det_vert
f_ll = np.copy(f) * f_multiplier
triangle11 = [[p0[0] + x21, p0[1] + y21],
[p0[0] + x21, p0[1] + y21 - y21 * f_ll],
[p0[0] + x21 - x21 * f_ll, p0[1] + y21]]
p21 = triangle11[0]
y22 = -(triangle11[1][1] - triangle11[0][1]) / np.sqrt(2)
y21 = 0
x21 = +(triangle11[2][0] - triangle11[0][0]) / np.sqrt(2)
x22 = +(triangle11[2][0] - triangle11[0][0]) / np.sqrt(2)
pupil_illuminated_only0_in_only1[((u0 - p21[0]) * np.cos(-angleRad21)
+ (v0 - p21[1]) * np.sin(-angleRad21) < x22)] = True
pupil_illuminated_only1[i_y_min:i_y_max, i_x_min:i_x_max] = pupil_illuminated_only0_in_only1
pupil.illuminated = pupil.illuminated * pupil_illuminated_only1
time_end_single_square = time.time()
if self.verbosity == 1:
logging.info('Time for cutting out the square is '
+ str(time_end_single_square - time_start_single_square))
def _cutRay(self, pupil, p0, angle, thickness, angleunit=None, wide=0):
"""Cut out a ray from a Pupil.
@param[in,out] pupil Pupil to modify in place
@param[in] p0 2-tuple indicating ray starting point
@param[in] angle Ray angle measured CCW from +x.
@param[in] thickness Thickness of cutout
@param[in] angleunit If None, changes internal units to radians
@param[in] wide Controls the widening of the strut as
a function of the distance from the origin
"""
if angleunit is None:
angleRad = angle.asRadians()
else:
angleRad = angle
# the 1 is arbitrary, just need something to define another point on
# the line
p1 = (p0[0] + 1, p0[1] + np.tan(angleRad))
d = PupilFactory._pointLineDistance((self.u, self.v), p0, p1)
radial_distance = 14.34 * np.sqrt((self.u - p0[0])**2 + (self.v - p0[1])**2)
pupil.illuminated[(d < 0.5 * thickness * (1 + wide * radial_distance))
& ((self.u - p0[0]) * np.cos(angleRad)
+ (self.v - p0[1]) * np.sin(angleRad) >= 0)] = False
def _addRay(self, pupil, p0, angle, thickness, angleunit=None):
"""Add a ray from a Pupil.
@param[in,out] pupil Pupil to modify in place
@param[in] p0 2-tuple indicating ray starting point
@param[in] angle Ray angle measured CCW from +x.
@param[in] thickness Thickness of cutout
"""
if angleunit is None:
angleRad = angle.asRadians()
else:
angleRad = angle
# the 1 is arbitrary, just need something to define another point on
# the line
p1 = (p0[0] + 1, p0[1] + np.tan(angleRad))
d = PupilFactory._pointLineDistance((self.u, self.v), p0, p1)
pupil.illuminated[(d < 0.5 * thickness)
& ((self.u - p0[0]) * np.cos(angleRad)
+ (self.v - p0[1]) * np.sin(angleRad) >= 0)] = True
class PFSPupilFactory(PupilFactory):
"""Pupil obscuration function factory for PFS
Based on the code by <NAME>, initially developed for HSC camera
Invokes PupilFactory to create obscurations of the camera
Adds various illumination effects which are specified to the spectrographs
"""
def __init__(
self,
pupilSize,
npix,
input_angle,
detFrac,
strutFrac,
slitFrac,
slitFrac_dy,
x_fiber,
y_fiber,
effective_ilum_radius,
frd_sigma,
frd_lorentz_factor,
det_vert,
slitHolder_frac_dx,
wide_0=0,
wide_23=0,
wide_43=0,
misalign=0,
verbosity=0):
"""!Construct a PupilFactory.
Parameters
----------
pupilSize: `float`
Size of the exit pupil [m]
npix: `int`
Constructed Pupils will be npix x npix
input_angle: `float`
Angle of the pupil (for all practical purposes fixed an np.pi/2)
detFrac: `float`
Value determining how much of the exit pupil obscured by the
central obscuration(detector)
strutFrac: `float`
Value determining how much of the exit pupil is obscured
by a single strut
slitFrac: `float`
Value determining how much of the exit pupil is obscured by slit
slitFrac_dy: `float`
Value determining what is the vertical position of the slit
in the exit pupil
x_fiber: `float`
Position of the fiber misaligment in the x direction
y_fiber: `float`
Position of the fiber misaligment in the y direction
effective_ilum_radius: `float`
Fraction of the maximal radius of the illumination
of the exit pupil that is actually illuminated
frd_sigma: `float`
Sigma of Gaussian convolving only outer edge, mimicking FRD
frd_lorentz_factor: `float`
Strength of the lorentzian factor describing wings
det_vert: `float`
Multiplicative factor determining vertical size
of the detector obscuration
wide_0: `float`
Widening of the strut at 0 degrees
wide_23: `float`
Widening of the strut at the top-left corner
wide_43: `float`
Widening of the strut at the bottom-left corner
misalign: `float`
Describing the amount of misaligment
verbosity: `int`
How verbose during evaluation (1 = full verbosity)
"""
self.verbosity = verbosity
if self.verbosity == 1:
logging.info('Entering PFSPupilFactory class')
PupilFactory.__init__(
self,
pupilSize,
npix,
input_angle,
detFrac,
strutFrac,
slitFrac,
slitFrac_dy,
x_fiber,
y_fiber,
effective_ilum_radius,
frd_sigma,
frd_lorentz_factor,
det_vert,
verbosity=self.verbosity,
wide_0=wide_0,
wide_23=wide_23,
wide_43=wide_43,
misalign=misalign)
self.x_fiber = x_fiber
self.y_fiber = y_fiber
self.slitHolder_frac_dx = slitHolder_frac_dx
self._spiderStartPos = [np.array([0., 0.]), np.array([0., 0.]), np.array([0., 0.])]
self._spiderAngles = [0, np.pi * 2 / 3, np.pi * 4 / 3]
self.effective_ilum_radius = effective_ilum_radius
self.wide_0 = wide_0
self.wide_23 = wide_23
self.wide_43 = wide_43
self.misalign = misalign
def getPupil(self, point):
"""!Calculate a Pupil at a given point in the focal plane.
@param point Point2D indicating focal plane coordinates.
@returns Pupil
"""
if self.verbosity == 1:
logging.info('Entering getPupil (function inside PFSPupilFactory)')
# called subaruRadius as it was taken from the code fitting pupil for HSC on Subaru
subaruRadius = (self.pupilSize / 2) * 1
detFrac = self.detFrac # linear fraction
hscRadius = detFrac * subaruRadius
slitFrac = self.slitFrac # linear fraction
subaruSlit = slitFrac * subaruRadius
strutFrac = self.strutFrac # linear fraction
subaruStrutThick = strutFrac * subaruRadius
# y-position of the slit
slitFrac_dy = self.slitFrac_dy
# relic from the HSC code
# See DM-8589 for more detailed description of following parameters
# d(lensCenter)/d(theta) in meters per degree
# lensRate = 0.0276 * 3600 / 128.9 * subaruRadius
# d(cameraCenter)/d(theta) in meters per degree
hscRate = 2.62 / 1000 * subaruRadius
hscPlateScale = 380
thetaX = point[0] * hscPlateScale
thetaY = point[1] * hscPlateScale
pupil = self._fullPupil()
camX = thetaX * hscRate
camY = thetaY * hscRate
# creating FRD effects
single_element = np.linspace(-1, 1, len(pupil.illuminated), endpoint=True, dtype=np.float32)
u_manual = np.tile(single_element, (len(single_element), 1))
v_manual = np.transpose(u_manual)
center_distance = np.sqrt((u_manual - self.x_fiber * hscRate * hscPlateScale * 12)
** 2 + (v_manual - self.y_fiber * hscRate * hscPlateScale * 12)**2)
frd_sigma = self.frd_sigma
sigma = 2 * frd_sigma
pupil_frd = (1 / 2 * (scipy.special.erf((-center_distance + self.effective_ilum_radius) / sigma)
+ scipy.special.erf((center_distance + self.effective_ilum_radius) / sigma)))
################
# Adding misaligment in this section
time_misalign_start = time.time()
position_of_center_0 = np.where(center_distance == np.min(center_distance))
position_of_center = [position_of_center_0[1][0], position_of_center_0[0][0]]
position_of_center_0_x = position_of_center_0[0][0]
position_of_center_0_y = position_of_center_0[1][0]
distances_to_corners = np.array([np.sqrt(position_of_center[0]**2 + position_of_center[1]**2),
np.sqrt((len(pupil_frd) - position_of_center[0])**2
+ position_of_center[1]**2),
np.sqrt((position_of_center[0])**2
+ (len(pupil_frd) - position_of_center[1])**2),
np.sqrt((len(pupil_frd) - position_of_center[0])**2
+ (len(pupil_frd) - position_of_center[1])**2)])
max_distance_to_corner = np.max(distances_to_corners)
threshold_value = 0.5
left_from_center = np.where(pupil_frd[position_of_center_0_x]
[0:position_of_center_0_y] < threshold_value)[0]
right_from_center = \
np.where(pupil_frd[position_of_center_0_x][position_of_center_0_y:] < threshold_value)[0] +\
position_of_center_0_y
up_from_center = \
np.where(pupil_frd[:, position_of_center_0_y][position_of_center_0_x:] < threshold_value)[0] +\
position_of_center_0_x
down_from_center = np.where(pupil_frd[:, position_of_center_0_y]
[:position_of_center_0_x] < threshold_value)[0]
if len(left_from_center) > 0:
size_of_05_left = position_of_center_0_y - np.max(left_from_center)
else:
size_of_05_left = 0
if len(right_from_center) > 0:
size_of_05_right = np.min(right_from_center) - position_of_center_0_y
else:
size_of_05_right = 0
if len(up_from_center) > 0:
size_of_05_up = np.min(up_from_center) - position_of_center_0_x
else:
size_of_05_up = 0
if len(down_from_center) > 0:
size_of_05_down = position_of_center_0_x - np.max(down_from_center)
else:
size_of_05_down = 0
sizes_4_directions = np.array([size_of_05_left, size_of_05_right, size_of_05_up, size_of_05_down])
max_size = np.max(sizes_4_directions)
imageradius = max_size
radiusvalues = np.linspace(
0, int(
np.ceil(max_distance_to_corner)), int(
np.ceil(max_distance_to_corner)) + 1)
sigtotp = sigma * 550
dif_due_to_mis_class = Pupil_misalign(radiusvalues, imageradius, sigtotp, self.misalign)
dif_due_to_mis = dif_due_to_mis_class()
scaling_factor_pixel_to_physical = max_distance_to_corner / np.max(center_distance)
distance_int = np.round(center_distance * scaling_factor_pixel_to_physical).astype(int)
pupil_frd_with_mis = pupil_frd + dif_due_to_mis[distance_int]
pupil_frd_with_mis[pupil_frd_with_mis > 1] = 1
time_misalign_end = time.time()
if self.verbosity == 1:
logging.info('Time to execute illumination considerations due to misalignment '
+ str(time_misalign_end - time_misalign_start))
####
pupil_lorentz = (np.arctan(2 * (self.effective_ilum_radius - center_distance) / (4 * sigma))
+ np.arctan(2 * (self.effective_ilum_radius + center_distance) / (4 * sigma))) /\
(2 * np.arctan((2 * self.effective_ilum_radius) / (4 * sigma)))
pupil_frd = np.copy(pupil_frd_with_mis)
pupil.illuminated = (pupil_frd + 1 * self.frd_lorentz_factor
* pupil_lorentz) / (1 + self.frd_lorentz_factor)
# Cout out the acceptance angle of the camera
self._cutCircleExterior(pupil, (0.0, 0.0), subaruRadius)
# Cut out detector shadow
self._cutSquare(pupil, (camX, camY), hscRadius, self.input_angle, self.det_vert)
# No vignetting of this kind for the spectroscopic camera
# self._cutCircleExterior(pupil, (lensX, lensY), lensRadius)
# Cut out spider shadow
for pos, angle in zip(self._spiderStartPos, self._spiderAngles):
x = pos[0] + camX
y = pos[1] + camY
if angle == 0:
# logging.info('cutRay applied to strut at angle '+str(angle))
self._cutRay(pupil, (x, y), angle, subaruStrutThick, 'rad', self.wide_0)
if angle == np.pi * 2 / 3:
# logging.info('cutRay applied to strut at angle '+str(angle))
self._cutRay(pupil, (x, y), angle, subaruStrutThick, 'rad', self.wide_23)
if angle == np.pi * 4 / 3:
# logging.info('cutRay applied to strut at angle '+str(angle))
self._cutRay(pupil, (x, y), angle, subaruStrutThick, 'rad', self.wide_43)
# cut out slit shadow
self._cutRay(pupil, (2, slitFrac_dy / 18), -np.pi, subaruSlit * 1.05, 'rad')
# cut out slit holder shadow
# subaruSlit/3 is roughly the width of the holder
self._cutRay(pupil, (self.slitHolder_frac_dx / 18, 1), -np.pi / 2, subaruSlit * 0.3, 'rad')
if self.verbosity == 1:
logging.info('Finished with getPupil')
return pupil
class Pupil_misalign(object):
"""Apply misaligment correction to the illumination of the pupil
Developed by <NAME> (Caltech)
Copied here without modifications
"""
def __init__(self, radiusvalues, imageradius, sigtotp, misalign):
self.radiusvalues = radiusvalues
self.imageradius = imageradius
self.sigtotp = sigtotp
self.misalign = misalign
def wapp(self, A):
# Approximation function by <NAME> to approximate and correct for the
# widening of width due to the angular misalignment convolution. This
# is used to basically scale the contribution of angular misalignment and FRD
# A = angmis/sigFRD
wappA = np.sqrt(1 + A * A * (1 + A * A) / (2 + 1.5 * A * A))
return wappA
def fcorr(self, x, A):
# The function scaled so that it keeps the same (approximate) width value
# after angular convolution
correctedfam = self.fcon(x * self.wapp(A), A)
return correctedfam
def fcon(self, x, A):
# For more detail about this method, see "Analyzing Radial Profiles for FRD
# and Angular Misalignment", by <NAME>unn, 16/06/13.
wt = [0.1864, 0.1469, 0.1134, 0.1066, 0.1134, 0.1469, 0.1864] # from <NAME>'s white paper,
# wt contains the normalized integrals under the angular misalignment
# convolution kernel, i.e., C(1-(x/angmisp)^2)^{-1/2} for |x|<angmisp and 0
# elsewhere. Note that the edges' centers are at +/- a, so they are
# integrated over an effective half of the length of the others.
temp = np.zeros(np.size(x))
for index in range(7):
temp = temp + wt[index] * self.ndfc(x + (index - 3) / 3 * A)
angconvolved = temp
return angconvolved
def ndfc(self, x):
# Standard model dropoff from a Gaussian convolution, normalized to brightness 1,
# radius (rh) 0, and sigTOT 1
# logging.info(len(x))
ndfcfun = 1 - (0.5 * erf(x / np.sqrt(2)) + 0.5)
return ndfcfun
def FA(self, r, rh, sigTOT, A):
# Function that takes all significant variables of the dropoff and
# normalizes the curve to be comparable to ndfc
# r = vector of radius values, in steps of pixels
# rh = radius of half-intensity. Effectively the size of the radius of the dropoff
# sigTOT = total width of the convolution kernel that recreates the width of the dropoff
# between 85% and 15% illumination. Effectively just think of this as sigma
# A = angmis/sigFRD, that is, the ratio between the angular misalignment
# and the sigma due to only FRD. Usually this is on the order of 1-3.
FitwithAngle = self.fcorr((r - rh) / sigTOT, A)
return FitwithAngle
def __call__(self):
no_mis = self.FA(self.radiusvalues, self.imageradius, self.sigtotp, 0)
with_mis = self.FA(self.radiusvalues, self.imageradius, self.sigtotp, self.misalign)
dif_due_to_mis = with_mis - no_mis
return dif_due_to_mis
class ZernikeFitterPFS(object):
"""Create a model images for PFS
Despite its name, it does not actually ``fits'' the paramters describing the donuts,
it ``just'' creates the images
The final image is made by the convolution of
1. an OpticalPSF (constructed using FFT)
2. an input fiber image
3. and other convolutions such as CCD charge diffusion
The OpticalPSF part includes
1.1. description of pupil
1.2. specification of an arbitrary number of zernike wavefront aberrations
This code uses lmfit to initalize the parameters.
Calls Psf_position
Calls Pupil classes (which ones?)
Called by LN_PFS_Single (function constructModelImage_PFS_naturalResolution)
"""
def __init__(self, image=np.ones((20, 20)), image_var=np.ones((20, 20)),
image_mask=None, pixelScale=20.76, wavelength=794,
diam_sic=139.5327e-3, npix=1536, pupilExplicit=None,
wf_full_Image=None,
ilum_Image=None, dithering=1, save=None,
pupil_parameters=None, use_pupil_parameters=None, use_optPSF=None, use_wf_grid=None,
zmaxInit=None, extraZernike=None, simulation_00=None, verbosity=None,
double_sources=None, double_sources_positions_ratios=None, test_run=None,
explicit_psf_position=None, use_only_chi=False, use_center_of_flux=False,
PSF_DIRECTORY=None, *args):
"""
Parameters
----------
image: `np.array`, (N, N)
image that you wish to model
if you do not pass the image that you wish to compare,
the algorithm will default to creating 20x20 image that has
value of '1' everywhere
image_var: `np.array`, (N, N)
variance image
if you do not pass the variance image,
the algorithm will default to creating 20x20 image that has
value of '1' everywhere
image_mask: `np.array`, (N, N)
mask image
pixelScale: `float`
pixel scale in arcseconds
This is size of the pixel in arcsec for PFS red arm in focus
calculated with http://www.wilmslowastro.com/software/formulae.htm
pixel size in microns/focal length in mm x 206.3
pixel size = 15 microns, focal length = 149.2 mm
(138 aperature x 1.1 f number)
wavelength: `float`
wavelength of the psf [nm]
if you do not pass the value for wavelength it will default to 794 nm,
which is roughly in the middle of the red detector
diam_sic: `float`
size of the exit pupil [m]
Exit pupil size in focus, default is 139.5237e-3 meters
(taken from Zemax)
npix: `int`
size of 2d array contaning exit pupil illumination
pupilExplicit: `np.array`, (Np, Np)
if avaliable, uses this image for pupil instead of
creating it from supplied parameters
wf_full_Image: `np.array`, (Np, Np)
wavefront image
if avaliable, uses this image for wavefront instead of
creating it from supplied parameters
dithering: `int`
dithering scale (most likely 1 or 2)
save: `int`
if 1, save various intermediate results, for testing purposes
needs to set up also PSF_DIRECTORY
use_optPSF: `np.array`, (Np, Np)
if provided skip creation of optical psf, only do postprocessing
use_wf_grid: `np.array`, (Ny, Nx)
if provided, use this explicit wavefront map
zmaxInit: `int`
highest Zernike order (11 or 22)
extraZernike: `np.array`, (N)
if provided, simulated Zernike orders higher than 22
simulation_00: `np.array`, (2,)
places optical center at the center of the final image
verbosity: `int`
verbosity during evaluations
double_sources:
is there a second source present in the image
double_sources_positions_ratios: `np.arrray`, (2,)
initial guess for the position and strength of the second source
explicit_psf_position: `np.array`, (2,)
explicit position where to place optical psf
use_only_chi: `bool`
if True, fit to minimize np.abs(chi), and not chi**2
use_center_of_flux: `bool`
if True, fit to minimize the distance between the center of flux
for the model and the input image
PSF_DIRECTORY: `str`
where will intermediate outputs be saved for testing purposes
Notes
----------
Creates a model image that is fitted to the input sicence image
The model image is made by the convolution of
1. an OpticalPSF (constructed using FFT)
created with _getOptPsf_naturalResolution
The OpticalPSF part includes
1.1. description of pupil
created with get_Pupil
1.2. specification of an arbitrary number of
zernike wavefront aberrations,
which are input to galsim.phase_screens.OpticalScreen
2. an input fiber image and other convolutions such as
CCD charge diffusion created with _optPsf_postprocessing
This code uses lmfit to initalize the parameters.
Calls class PsfPosition
Calls class PFSPupilFactory
Examples
----------
Simple exampe with initial parameters, changing only one parameter
>>> zmax = 22
>>> single_image_analysis = ZernikeFitterPFS(zmaxInit = zmax,
verbosity=1)
>>> single_image_analysis.initParams()
>>> single_image_analysis.params['detFrac'] =\
lmfit.Parameter(name='detFrac', value=0.70)
>>> resulting_image, psf_pos =\
single_image_analysis.constructModelImage_PFS_naturalResolution()
"""
self.image = image
self.image_var = image_var
if image_mask is None:
image_mask = np.zeros(image.shape)
self.image_mask = image_mask
self.wavelength = wavelength
self.diam_sic = diam_sic
self.npix = npix
self.dithering = dithering
self.pixelScale = pixelScale
self.pixelScale_effective = self.pixelScale / dithering
if save in (None, 0):
save = None
else:
save = 1
self.save = save
self.use_optPSF = use_optPSF
# puilExplicit can be used to pass explicitly the image of the pupil
# instead of creating it from the supplied parameters
if pupilExplicit is None:
pupilExplicit is False
self.pupilExplicit = pupilExplicit
if pupil_parameters is None:
self.pupil_parameters = pupil_parameters
else:
self.pupil_parameters = pupil_parameters
if use_pupil_parameters is None:
self.use_pupil_parameters = use_pupil_parameters
else:
self.use_pupil_parameters = use_pupil_parameters
self.args = args
self.use_wf_grid = use_wf_grid
self.zmax = zmaxInit
self.simulation_00 = simulation_00
if self.simulation_00:
self.simulation_00 = 1
self.extraZernike = extraZernike
self.verbosity = verbosity
self.double_sources = double_sources
self.double_sources_positions_ratios = double_sources_positions_ratios
self.test_run = test_run
self.explicit_psf_position = explicit_psf_position
self.use_only_chi = use_only_chi
self.use_center_of_flux = use_center_of_flux
self.flux = float(np.sum(image))
try:
if not explicit_psf_position:
self.explicit_psf_position = None
except BaseException:
pass
self.PSF_DIRECTORY = PSF_DIRECTORY
############################################################
if self.PSF_DIRECTORY is None:
# names of default directories where I often work
if socket.gethostname() == 'IapetusUSA':
self.PSF_DIRECTORY = '/Volumes/Saturn_USA/PFS/'
elif socket.gethostname() == 'pfsa-usr01-gb.subaru.nao.ac.jp' or \
socket.gethostname() == 'pfsa-usr02-gb.subaru.nao.ac.jp':
self.PSF_DIRECTORY = '/work/ncaplar/'
else:
self.PSF_DIRECTORY = '/tigress/ncaplar/PFS/'
if self.PSF_DIRECTORY is not None:
self.TESTING_FOLDER = self.PSF_DIRECTORY + 'Testing/'
self.TESTING_PUPIL_IMAGES_FOLDER = self.TESTING_FOLDER + 'Pupil_Images/'
self.TESTING_WAVEFRONT_IMAGES_FOLDER = self.TESTING_FOLDER + 'Wavefront_Images/'
self.TESTING_FINAL_IMAGES_FOLDER = self.TESTING_FOLDER + 'Final_Images/'
if self.verbosity == 1:
# check the versions of the most important libraries
logging.info('np.__version__' + str(np.__version__))
logging.info('scipy.__version__' + str(scipy.__version__))
def initParams(
self,
z4Init=None,
detFracInit=None,
strutFracInit=None,
focalPlanePositionInit=None,
slitFracInit=None,
slitFrac_dy_Init=None,
wide_0Init=None,
wide_23Init=None,
wide_43Init=None,
radiometricEffectInit=None,
radiometricExponentInit=None,
x_ilumInit=None,
y_ilumInit=None,
pixel_effectInit=None,
backgroundInit=None,
x_fiberInit=None,
y_fiberInit=None,
effective_ilum_radiusInit=None,
frd_sigmaInit=None,
frd_lorentz_factorInit=None,
misalignInit=None,
det_vertInit=None,
slitHolder_frac_dxInit=None,
grating_linesInit=None,
scattering_slopeInit=None,
scattering_amplitudeInit=None,
fiber_rInit=None,
fluxInit=None):
"""Initialize lmfit Parameters object.
Allows to set up all parameters describing the pupil and
Zernike parameter (up to z22) explicitly. If any value is not passed,
it will be substituted by a default value (specified below).
Parameters
----------
zmax: `int`
Total number of Zernike aberrations used (11 or 22)
Possible to add more with extra_zernike parameter
z4Init: `float`
Initial Z4 aberration value in waves (that is 2*np.pi*wavelengths)
# pupil parameters
detFracInit: `float`
Value determining how much of the exit pupil obscured by the
central obscuration(detector)
strutFracInit: `float`
Value determining how much of the exit pupil is obscured
by a single strut
focalPlanePositionInit: (`float`, `float`)
2-tuple for position of the central obscuration(detector)
in the focal plane
slitFracInit: `float`
Value determining how much of the exit pupil is obscured by slit
slitFrac_dy_Init: `float`
Value determining what is the vertical position of the slit
in the exit pupil
# parameters dsecribing individual struts
wide_0Init: `float`
Parameter describing widening of the strut at 0 degrees
wide_23Init: `float`
Parameter describing widening of the top-left strut
wide_34Init: `float`
Parameter describing widening of the bottom-left strut
#non-uniform illumination
radiometricEffectInit: `float`
parameter describing non-uniform illumination of the pupil
(1-params['radiometricEffect']**2*r**2)**\
(params['radiometricExponent']) [DEPRECATED]
radiometricExponentInit: `float`
parameter describing non-uniform illumination of the pupil
(1-params['radiometricEffect']**2*r**2)\
**(params['radiometricExponent'])
x_ilumInit: `float`
x-position of the center of illumination
of the exit pupil [DEPRECATED]
y_ilumInit: `float`
y-position of the center of illumination
of the exit pupil [DEPRECATED]
# illumination due to fiber, parameters
x_fiberInit: `float`
position of the fiber misaligment in the x direction
y_fiberInit: `float`
position of the fiber misaligment in the y direction
effective_ilum_radiusInit: `float`
fraction of the maximal radius of the illumination
of the exit pupil that is actually illuminated
frd_sigma: `float`
sigma of Gaussian convolving only outer edge, mimicking FRD
frd_lorentz_factor: `float`
strength of the lorentzian factor describing wings
of the pupil illumination
misalign: `float`
amount of misaligment in the illumination
# further pupil parameters
det_vert: `float
multiplicative factor determining vertical size
of the detector obscuration
slitHolder_frac_dx: `float`
dx position of slit holder
# convolving (postprocessing) parameters
grating_lines: `int`
number of effective lines in the grating
scattering_slopeInit: `float`
slope of scattering
scattering_amplitudeInit: `float`
amplitude of scattering compared to optical PSF
pixel_effectInit: `float`
sigma describing charge diffusion effect [in units of 15 microns]
fiber_rInit: `float`
radius of perfect tophat fiber, as seen on the detector
[in units of 15 microns]
fluxInit: `float`
total flux in generated image compared to input image
(needs to be 1 or very close to 1)
"""
if self.verbosity == 1:
logging.info(' ')
logging.info('Initializing ZernikeFitterPFS')
logging.info('Verbosity parameter is: ' + str(self.verbosity))
logging.info('Highest Zernike polynomial is (zmax): ' + str(self.zmax))
params = lmfit.Parameters()
# Zernike parameters
z_array = []
if z4Init is None:
params.add('z4', 0.0)
else:
params.add('z4', z4Init)
for i in range(5, self.zmax + 1):
params.add('z{}'.format(i), 0.0)
# pupil parameters
if detFracInit is None:
params.add('detFrac', 0.65)
else:
params.add('detFrac', detFracInit)
if strutFracInit is None:
params.add('strutFrac', 0.07)
else:
params.add('strutFrac', strutFracInit)
if focalPlanePositionInit is None:
params.add('dxFocal', 0.0)
params.add('dyFocal', 0.0)
else:
params.add('dxFocal', focalPlanePositionInit[0])
params.add('dyFocal', focalPlanePositionInit[1])
if slitFracInit is None:
params.add('slitFrac', 0.05)
else:
params.add('slitFrac', slitFracInit)
if slitFrac_dy_Init is None:
params.add('slitFrac_dy', 0)
else:
params.add('slitFrac_dy', slitFrac_dy_Init)
# parameters dsecribing individual struts
if wide_0Init is None:
params.add('wide_0', 0)
else:
params.add('wide_0', wide_0Init)
if wide_23Init is None:
params.add('wide_23', 0)
else:
params.add('wide_23', wide_23Init)
if wide_43Init is None:
params.add('wide_43', 0)
else:
params.add('wide_43', wide_43Init)
# non-uniform illumination
if radiometricExponentInit is None:
params.add('radiometricExponent', 0.25)
else:
params.add('radiometricExponent', radiometricExponentInit)
if radiometricEffectInit is None:
params.add('radiometricEffect', 0)
else:
params.add('radiometricEffect', radiometricEffectInit)
if x_ilumInit is None:
params.add('x_ilum', 1)
else:
params.add('x_ilum', x_ilumInit)
if y_ilumInit is None:
params.add('y_ilum', 1)
else:
params.add('y_ilum', y_ilumInit)
# illumination due to fiber, parameters
if x_ilumInit is None:
params.add('x_fiber', 1)
else:
params.add('x_fiber', x_fiberInit)
if y_fiberInit is None:
params.add('y_fiber', 0)
else:
params.add('y_fiber', y_fiberInit)
if effective_ilum_radiusInit is None:
params.add('effective_ilum_radius', 0.9)
else:
params.add('effective_ilum_radius', effective_ilum_radiusInit)
if frd_sigmaInit is None:
params.add('frd_sigma', 0.02)
else:
params.add('frd_sigma', frd_sigmaInit)
if frd_lorentz_factorInit is None:
params.add('frd_lorentz_factor', 0.5)
else:
params.add('frd_lorentz_factor', frd_lorentz_factorInit)
if misalignInit is None:
params.add('misalign', 0)
else:
params.add('misalign', misalignInit)
# further pupil parameters
if det_vertInit is None:
params.add('det_vert', 1)
else:
params.add('det_vert', det_vertInit)
if slitHolder_frac_dxInit is None:
params.add('slitHolder_frac_dx', 0)
else:
params.add('slitHolder_frac_dx', slitHolder_frac_dxInit)
# convolving (postprocessing) parameters
if grating_linesInit is None:
params.add('grating_lines', 100000)
else:
params.add('grating_lines', grating_linesInit)
if scattering_slopeInit is None:
params.add('scattering_slope', 2)
else:
params.add('scattering_slope', scattering_slopeInit)
if scattering_amplitudeInit is None:
params.add('scattering_amplitude', 10**-2)
else:
params.add('scattering_amplitude', scattering_amplitudeInit)
if pixel_effectInit is None:
params.add('pixel_effect', 0.35)
else:
params.add('pixel_effect', pixel_effectInit)
if fiber_rInit is None:
params.add('fiber_r', 1.8)
else:
params.add('fiber_r', fiber_rInit)
if fluxInit is None:
params.add('flux', 1)
else:
params.add('flux', fluxInit)
self.params = params
self.optPsf = None
self.z_array = z_array
def constructModelImage_PFS_naturalResolution(
self,
params=None,
shape=None,
pixelScale=None,
use_optPSF=None,
extraZernike=None,
return_intermediate_images=False):
"""Construct model image given the set of parameters
Parameters
----------
params : `lmfit.Parameters` object or python dictionary
Parameters describing model; None to use self.params
shape : `(int, int)`
Shape for model image; None to use the shape of self.maskedImage
pixelScale : `float`
Pixel scale in arcseconds to use for model image;
None to use self.pixelScale.
use_optPSF : `bool`
If True, use previously generated optical PSF,
skip _getOptPsf_naturalResolution, and conduct only postprocessing
extraZernike : `np.array`, (N,)
Zernike parameteres beyond z22
return_intermediate_images : `bool`
If True, return intermediate images created during the run
This is in order to help with debugging and inspect
the images created during the process
Return
----------
(if not return_intermediate_images)
optPsf_final : `np.array`, (N, N)
Final model image
psf_position : np.array, (2,)
Position where image is centered
(if return_intermediate_images)
optPsf_final : `np.array`, (N, N)
Final model image
ilum : `np.array`, (N, N)
Illumination array
wf_grid_rot : `np.array`, (N, N)
Wavefront array
psf_position : np.array, (2,)
Position where image is centered
Notes
----------
Calls _getOptPsf_naturalResolution and optPsf_postprocessing
"""
if self.verbosity == 1:
logging.info(' ')
logging.info('Entering constructModelImage_PFS_naturalResolution')
if params is None:
params = self.params
if shape is None:
shape = self.image.shape
if pixelScale is None:
pixelScale = self.pixelScale
logging.info('pixelScale_1573'+str(pixelScale))
try:
parameter_values = params.valuesdict()
except AttributeError:
parameter_values = params
use_optPSF = self.use_optPSF
if extraZernike is None:
pass
else:
extraZernike = list(extraZernike)
self.extraZernike = extraZernike
# if you did not pass pure optical psf image, create one here
if use_optPSF is None:
# change outputs depending on if you want intermediate results
if not return_intermediate_images:
optPsf = self._getOptPsf_naturalResolution(
parameter_values, return_intermediate_images=return_intermediate_images)
else:
optPsf, ilum, wf_grid_rot = self._getOptPsf_naturalResolution(
parameter_values, return_intermediate_images=return_intermediate_images)
else:
# if you claimed to have supplied optical psf image,
# but none is provided still create one
if self.optPsf is None:
if not return_intermediate_images:
optPsf = self._getOptPsf_naturalResolution(
parameter_values, return_intermediate_images=return_intermediate_images)
else:
optPsf, ilum, wf_grid_rot = self._getOptPsf_naturalResolution(
parameter_values, return_intermediate_images=return_intermediate_images)
self.optPsf = optPsf
else:
optPsf = self.optPsf
# at the moment, no difference in optPsf_postprocessing depending on return_intermediate_images
optPsf_final, psf_position = self._optPsf_postprocessing(
optPsf, return_intermediate_images=return_intermediate_images)
if self.save == 1:
np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'optPsf', optPsf)
np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'optPsf_final', optPsf_final)
else:
pass
if not return_intermediate_images:
return optPsf_final, psf_position
if return_intermediate_images:
return optPsf_final, ilum, wf_grid_rot, psf_position
if self.verbosity == 1:
logging.info('Finished with constructModelImage_PFS_naturalResolution')
logging.info(' ')
def _optPsf_postprocessing(self, optPsf, return_intermediate_images=False):
"""Apply postprocessing to the pure optical psf image
Parameters
----------
optPsf : `np.array`, (N, N)
Optical image, only psf
return_intermediate_images : `bool`
If True, return intermediate images created during the run
This is potentially in order to help with debugging and inspect
the images created during the process
Returns
----------
(At the moment, the output is the same no matter what
return_intermediate_images is, but there is a possibility
to add intermediate outputs)
optPsf_final : `np.array`, (N, N)
Final model image
psf_position : `np.array`, (2,)
Position where the image is centered
Notes
----------
Takes optical psf and ``postprocesses`` it to generate final image.
The algorithm first reduces the oversampling and cuts the central part
of the image. This is done to speed up the calculations.
Then we apply various effects that are separate from
the pure optical PSF considerations.
We then finish with the centering algorithm to move our created image
to fit the input science image, invoking PSFPosition class.
The effects we apply are
1. scattered light
function apply_scattered_light
2. convolution with fiber
function convolve_with_fiber
3. CCD difusion
function convolve_with_CCD_diffusion
4. grating effects
function convolve_with_grating
5. centering
via class PsfPosition
"""
time_start_single = time.time()
if self.verbosity == 1:
logging.info(' ')
logging.info('Entering optPsf_postprocessing')
params = self.params
shape = self.image.shape
# all of the parameters for the creation of the image
# very stupidly called ``v'' without any reason whatsoever
param_values = params.valuesdict()
# how much is my generated image oversampled compared to final image
oversampling_original = (self.pixelScale_effective) / self.scale_ModelImage_PFS_naturalResolution
if self.verbosity == 1:
logging.info('Shape of optPsf: ' + str(optPsf.shape))
logging.info('Value of oversampling_original: ' + str(oversampling_original))
# determine the size, so that from the huge generated image we can cut out
# only the central portion (1.4 times larger than the size of actual
# image)
size_of_central_cut = int(oversampling_original * self.image.shape[0] * 1.4)
if size_of_central_cut > optPsf.shape[0]:
# if larger than size of image, cut the image
# fail if not enough space
size_of_central_cut = optPsf.shape[0]
if self.verbosity == 1:
logging.info('size_of_central_cut modified to ' + str(size_of_central_cut))
assert int(oversampling_original * self.image.shape[0] * 1.0) < optPsf.shape[0]
assert size_of_central_cut <= optPsf.shape[0]
if self.verbosity == 1:
logging.info('size_of_central_cut: ' + str(size_of_central_cut))
# cut part which you need to form the final image
# set oversampling to 1 so you are not resizing the image, and dx=0 and
# dy=0 so that you are not moving around, i.e., you are cutting the
# central region
optPsf_cut = PsfPosition.cut_Centroid_of_natural_resolution_image(
image=optPsf, size_natural_resolution=size_of_central_cut + 1, oversampling=1, dx=0, dy=0)
if self.verbosity == 1:
logging.info('optPsf_cut.shape' + str(optPsf_cut.shape))
# we want to reduce oversampling to be roughly around 10 to make things computationaly easier
# if oversamplign_original is smaller than 20 (in case of dithered images),
# make res coarser by factor of 2
# otherwise set it to 11
if oversampling_original < 20:
oversampling = np.round(oversampling_original / 2)
else:
oversampling = 11
if self.verbosity == 1:
logging.info('oversampling:' + str(oversampling))
# what will be the size of the image after you resize it to the from
# ``oversampling_original'' to ``oversampling'' ratio
size_of_optPsf_cut_downsampled = np.int(
np.round(size_of_central_cut / (oversampling_original / oversampling)))
if self.verbosity == 1:
logging.info('size_of_optPsf_cut_downsampled: ' + str(size_of_optPsf_cut_downsampled))
# make sure that optPsf_cut_downsampled is an array which has an odd size
# - increase size by 1 if needed
if (size_of_optPsf_cut_downsampled % 2) == 0:
im1 = galsim.Image(optPsf_cut, copy=True, scale=1)
im1.setCenter(0, 0)
interpolated_image = galsim._InterpolatedImage(im1, x_interpolant=galsim.Lanczos(5, True))
optPsf_cut_downsampled = interpolated_image.\
drawImage(nx=size_of_optPsf_cut_downsampled + 1, ny=size_of_optPsf_cut_downsampled + 1,
scale=(oversampling_original / oversampling), method='no_pixel').array
else:
im1 = galsim.Image(optPsf_cut, copy=True, scale=1)
im1.setCenter(0, 0)
interpolated_image = galsim._InterpolatedImage(im1, x_interpolant=galsim.Lanczos(5, True))
optPsf_cut_downsampled = interpolated_image.\
drawImage(nx=size_of_optPsf_cut_downsampled, ny=size_of_optPsf_cut_downsampled,
scale=(oversampling_original / oversampling), method='no_pixel').array
if self.verbosity == 1:
logging.info('optPsf_cut_downsampled.shape: ' + str(optPsf_cut_downsampled.shape))
# gives middle point of the image to used for calculations of scattered light
# mid_point_of_optPsf_cut_downsampled = int(optPsf_cut_downsampled.shape[0] / 2)
# gives the size of one pixel in optPsf_downsampled in microns
# one physical pixel is 15 microns
# effective size is 15 / dithering
# size_of_pixels_in_optPsf_cut_downsampled = (15 / self.dithering) / oversampling
# size of the created optical PSF images in microns
# size_of_optPsf_cut_in_Microns = size_of_pixels_in_optPsf_cut_downsampled * \
# (optPsf_cut_downsampled.shape[0])
# if self.verbosity == 1:
# logging.info('size_of_optPsf_cut_in_Microns: ' + str(size_of_optPsf_cut_in_Microns))
if self.verbosity == 1:
logging.info('Postprocessing parameters are:')
logging.info(str(['grating_lines', 'scattering_slope', 'scattering_amplitude',
'pixel_effect', 'fiber_r']))
logging.info(str([param_values['grating_lines'], param_values['scattering_slope'],
param_values['scattering_amplitude'], param_values['pixel_effect'],
param_values['fiber_r']]))
##########################################
# 1. scattered light
optPsf_cut_downsampled_scattered = self.apply_scattered_light(optPsf_cut_downsampled,
oversampling,
param_values['scattering_slope'],
param_values['scattering_amplitude'],
dithering=self.dithering)
##########################################
# 2. convolution with fiber
optPsf_cut_fiber_convolved = self.convolve_with_fiber(optPsf_cut_downsampled_scattered,
oversampling,
param_values['fiber_r'],
dithering=self.dithering)
##########################################
# 3. CCD difusion
optPsf_cut_pixel_response_convolved = self.convolve_with_CCD_diffusion(optPsf_cut_fiber_convolved,
oversampling,
param_values['pixel_effect'],
dithering=self.dithering)
##########################################
# 4. grating effects
optPsf_cut_grating_convolved = self.convolve_with_grating(optPsf_cut_pixel_response_convolved,
oversampling,
self.wavelength,
param_values['grating_lines'],
dithering=self.dithering)
##########################################
# 5. centering
# This is the part which creates the final image
# the algorithm finds the best downsampling combination automatically
if self.verbosity == 1:
logging.info('Are we invoking double sources (1 or True if yes): ' + str(self.double_sources))
logging.info('Double source position/ratio is:' + str(self.double_sources_positions_ratios))
# initialize the class which does the centering -
# TODO: the separation between the class and the main function in the class,
# ``find_single_realization_min_cut'', is a bit blurry and unsatisfactory
# this needs to be improved
single_Psf_position = PsfPosition(optPsf_cut_grating_convolved,
int(round(oversampling)),
shape[0],
simulation_00=self.simulation_00,
verbosity=self.verbosity,
save=self.save,
PSF_DIRECTORY=self.PSF_DIRECTORY)
time_end_single = time.time()
if self.verbosity == 1:
logging.info('Time for postprocessing up to single_Psf_position protocol is: '
+ str(time_end_single - time_start_single))
# run the code for centering
time_start_single = time.time()
optPsf_final, psf_position =\
single_Psf_position.find_single_realization_min_cut(optPsf_cut_grating_convolved,
int(round(oversampling)),
shape[0],
self.image,
self.image_var,
self.image_mask,
v_flux=param_values['flux'],
double_sources=self.double_sources,
double_sources_positions_ratios= # noqa: E251
self.double_sources_positions_ratios,
verbosity=self.verbosity,
explicit_psf_position= # noqa: E251
self.explicit_psf_position,
use_only_chi=self.use_only_chi,
use_center_of_flux=self.use_center_of_flux)
time_end_single = time.time()
if self.verbosity == 1:
logging.info('Time for single_Psf_position protocol is '
+ str(time_end_single - time_start_single))
if self.verbosity == 1:
logging.info('Sucesfully created optPsf_final')
print(self.save)
if self.save == 1:
np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'optPsf_cut', optPsf_cut)
np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'optPsf_cut_downsampled', optPsf_cut_downsampled)
np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'optPsf_cut_downsampled_scattered',
optPsf_cut_downsampled_scattered)
np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'optPsf_cut_fiber_convolved',
optPsf_cut_fiber_convolved)
np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'optPsf_cut_pixel_response_convolved',
optPsf_cut_pixel_response_convolved)
np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'optPsf_cut_grating_convolved',
optPsf_cut_grating_convolved)
if self.verbosity == 1:
logging.info('Finished with optPsf_postprocessing')
logging.info(' ')
# TODO: at the moment, the output is the same but there is a possibility to add intermediate outputs
if not return_intermediate_images:
return optPsf_final, psf_position
if return_intermediate_images:
return optPsf_final, psf_position
def apply_scattered_light(self, image, oversampling,
scattering_slope, scattering_amplitude, dithering):
"""Add scattered light to optical psf
Parameters
----------
image : `np.array`, (N, N)
input image
oversampling: `int`
how oversampled is `image`
scattering_slope: `float`
slope of the scattered light
scattering_amplitude: `float`
amplitude of the scattered light
dithering: `int`
dithering
Returns
----------
image_scattered : `np.array`, (N, N)
image convolved with the fiber image
Notes
----------
Assumes that one physical pixel is 15 microns
so that effective size of the pixels is 15 / dithering
"""
size_of_pixels_in_image = (15 / self.dithering) / oversampling
# size of the created optical PSF images in microns
size_of_image_in_Microns = size_of_pixels_in_image * \
(image.shape[0])
# create grid to apply scattered light
pointsx = np.linspace(-(size_of_image_in_Microns - size_of_pixels_in_image) / 2,
(size_of_image_in_Microns - size_of_pixels_in_image) / 2,
num=image.shape[0],
dtype=np.float32)
pointsy = np.linspace(-(size_of_image_in_Microns - size_of_pixels_in_image) / 2,
(size_of_image_in_Microns - size_of_pixels_in_image) / 2,
num=image.shape[0]).astype(np.float32)
xs, ys = np.meshgrid(pointsx, pointsy)
r0 = np.sqrt((xs - 0) ** 2 + (ys - 0) ** 2) + .01
# creating scattered light
scattered_light_kernel = (r0**(-scattering_slope))
scattered_light_kernel[r0 < 7.5] = 7.5**(-scattering_slope)
scattered_light_kernel[scattered_light_kernel == np.inf] = 0
scattered_light_kernel = scattered_light_kernel * \
(scattering_amplitude) / (10 * np.max(scattered_light_kernel))
# convolve the psf with the scattered light kernel to create scattered light component
scattered_light = signal.fftconvolve(image, scattered_light_kernel, mode='same')
# add back the scattering to the image
image_scattered = image + scattered_light
return image_scattered
def convolve_with_fiber(self, image, oversampling, fiber_r, dithering):
"""Convolve optical psf with a fiber
Parameters
----------
image : `np.array`, (N, N)
input image
oversampling: `int`
how oversampled is `image`
fiber_r: `float`
radius of the fiber in pixel units
dithering: `int`
dithering
Returns
----------
image_fiber_convolved : `np.array`, (N, N)
image convolved with the fiber image
Notes
----------
"""
fiber = Tophat2DKernel(oversampling * fiber_r * dithering,
mode='oversample').array
# create array with zeros with size of the current image, which we will
# fill with fiber array in the middle
fiber_padded = np.zeros_like(image, dtype=np.float32)
mid_point_of_image = int(image.shape[0] / 2)
fiber_array_size = fiber.shape[0]
# fill the zeroes image with fiber here
fiber_padded[int(mid_point_of_image - fiber_array_size / 2) + 1:
int(mid_point_of_image + fiber_array_size / 2) + 1,
int(mid_point_of_image - fiber_array_size / 2) + 1:
int(mid_point_of_image + fiber_array_size / 2) + 1] = fiber
# convolve with the fiber
image_fiber_convolved = signal.fftconvolve(image, fiber_padded, mode='same')
return image_fiber_convolved
def convolve_with_CCD_diffusion(self, image, oversampling, pixel_effect, dithering):
"""Convolve optical psf with a ccd diffusion effect
Parameters
----------
image : `np.array`, (N, N)
input image
oversampling: `int`
how oversampled is `image`
pixel_effect: `float`
sigma of gaussian kernel convolving image
dithering: `int`
dithering
Returns
----------
image_pixel_response_convolved : `np.array`, (N, N)
image convolved with the ccd diffusion kernel
Notes
----------
Pixels are not perfect detectors
Charge diffusion in our optical CCDs, can be well described with a Gaussian
sigma that is around 7 microns (<NAME> - private communication).
This is controled in our code by @param 'pixel_effect'
"""
pixel_gauss = Gaussian2DKernel(oversampling * pixel_effect * dithering).array.astype(np.float32)
pixel_gauss_padded = np.pad(pixel_gauss, int((len(image) - len(pixel_gauss)) / 2),
'constant', constant_values=0)
# assert that gauss_padded array did not produce empty array
assert np.sum(pixel_gauss_padded) > 0
image_pixel_response_convolved = signal.fftconvolve(image, pixel_gauss_padded, mode='same')
return image_pixel_response_convolved
def convolve_with_grating(self, image, oversampling, wavelength, grating_lines, dithering):
"""Convolve optical psf with a grating effect
Parameters
----------
image : `np.array`, (N, N)
input image
oversampling: `int`
how oversampled is `image`
wavelength: `float`
central wavelength of the spot
grating_lines: `int`
effective number of grating lines in the spectrograph
dithering: `int`
dithering
Returns
----------
image_grating_convolved : `np.array`, (N, N)
image convolved with the grating effect
Notes
----------
This code assumes that 15 microns covers wavelength range of 0.07907 nm
(assuming that 4300 pixels in real detector uniformly covers 340 nm)
"""
grating_kernel = np.ones((image.shape[0], 1), dtype=np.float32)
for i in range(len(grating_kernel)):
grating_kernel[i] = Ifun16Ne((i - int(image.shape[0] / 2)) * 0.07907 * 10**-9
/ (dithering * oversampling) + wavelength * 10**-9,
wavelength * 10**-9, grating_lines)
grating_kernel = grating_kernel / np.sum(grating_kernel)
image_grating_convolved = signal.fftconvolve(image, grating_kernel, mode='same')
return image_grating_convolved
def _get_Pupil(self):
"""Create an image of the pupil
Parameters
----------
params : `lmfit.Parameters` object or python dictionary
Parameters describing the pupil model
Returns
----------
pupil : `pupil`
Instance of class PFSPupilFactory
Notes
----------
Calls PFSPupilFactory class
"""
if self.verbosity == 1:
logging.info(' ')
logging.info('Entering _get_Pupil (function inside ZernikeFitterPFS)')
if self.verbosity == 1:
logging.info('Size of the pupil (npix): ' + str(self.npix))
Pupil_Image = PFSPupilFactory(
pupilSize=self.diam_sic,
npix=self.npix,
input_angle=np.pi / 2,
detFrac=self.params['detFrac'].value,
strutFrac=self.params['strutFrac'].value,
slitFrac=self.params['slitFrac'].value,
slitFrac_dy=self.params['slitFrac_dy'].value,
x_fiber=self.params['x_fiber'].value,
y_fiber=self.params['y_fiber'].value,
effective_ilum_radius=self.params['effective_ilum_radius'].value,
frd_sigma=self.params['frd_sigma'].value, # noqa: E
frd_lorentz_factor=self.params['frd_lorentz_factor'].value,
det_vert=self.params['det_vert'].value,
slitHolder_frac_dx=self.params['slitHolder_frac_dx'].value,
wide_0=self.params['wide_0'].value,
wide_23=self.params['wide_23'].value,
wide_43=self.params['wide_43'].value,
misalign=self.params['misalign'].value,
verbosity=self.verbosity)
point = [self.params['dxFocal'].value, self.params['dyFocal'].value] # noqa: E
pupil = Pupil_Image.getPupil(point)
if self.save == 1:
np.save(self.TESTING_PUPIL_IMAGES_FOLDER + 'pupil.illuminated',
pupil.illuminated.astype(np.float32))
if self.verbosity == 1:
logging.info('Finished with _get_Pupil')
return pupil
def _getOptPsf_naturalResolution(self, params, return_intermediate_images=False):
"""Returns optical PSF, given the initialized parameters
Parameters
----------
params : `lmfit.Parameters` object or python dictionary
Parameters descrubing model
return_intermediate_images : `bool`
If True, return intermediate images created during the run
This is in order to help with debugging and inspect
the images created during the process
Returns
----------
(if not return_intermediate_images)
img_apod : `np.array`
Psf image, only optical components considred
(if return_intermediate_images)
# return the image, pupil, illumination applied to the pupil
img_apod : `np.array`
Psf image, only optical components considred
ilum : `np.array`
Image showing the illumination of the pupil
wf_grid_rot : `np.array`
Image showing the wavefront across the pupil
Notes
----------
called by constructModelImage_PFS_naturalResolution
"""
if self.verbosity == 1:
logging.info(' ')
logging.info('Entering _getOptPsf_naturalResolution')
################################################################################
# pupil and illumination of the pupil
################################################################################
time_start_single_1 = time.time()
if self.verbosity == 1:
logging.info('use_pupil_parameters: ' + str(self.use_pupil_parameters))
logging.info('pupil_parameters if you are explicity passing use_pupil_parameters: '
+ str(self.pupil_parameters))
# parmeters ``i'' just to precision in the construction of ``pupil_parameters'' array
# not sure why linter is complaining here with
# ('...'.format(...) has unused arguments at position(s): 0)
i = 4
if self.use_pupil_parameters is None:
pupil_parameters = np.array([params['detFrac'.format(i)], # noqa: E
params['strutFrac'.format(i)], # noqa: E
params['dxFocal'.format(i)], # noqa: E
params['dyFocal'.format(i)], # noqa: E
params['slitFrac'.format(i)], # noqa: E
params['slitFrac_dy'.format(i)], # noqa: E
params['x_fiber'.format(i)], # noqa: E
params['y_fiber'.format(i)], # noqa: E
params['effective_ilum_radius'.format(i)], # noqa: E
params['frd_sigma'.format(i)], # noqa: E
params['frd_lorentz_factor'.format(i)], # noqa: E
params['det_vert'.format(i)], # noqa: E
params['slitHolder_frac_dx'.format(i)], # noqa: E
params['wide_0'.format(i)], # noqa: E
params['wide_23'.format(i)], # noqa: E
params['wide_43'.format(i)], # noqa: E
params['misalign'.format(i)]]) # noqa: E
self.pupil_parameters = pupil_parameters
else:
pupil_parameters = np.array(self.pupil_parameters)
diam_sic = self.diam_sic
if self.verbosity == 1:
logging.info(['detFrac', 'strutFrac', 'dxFocal', 'dyFocal', 'slitFrac', 'slitFrac_dy'])
logging.info(['x_fiber', 'y_fiber', 'effective_ilum_radius', 'frd_sigma',
'frd_lorentz_factor', 'det_vert', 'slitHolder_frac_dx'])
logging.info(['wide_0', 'wide_23', 'wide_43', 'misalign'])
logging.info('set of pupil_parameters I. : ' + str([params['detFrac'], params['strutFrac'],
params['dxFocal'], params['dyFocal'],
params['slitFrac'], params['slitFrac_dy']]))
logging.info('set of pupil_parameters II. : ' + str([params['x_fiber'], params['y_fiber'],
params['effective_ilum_radius'],
params['slitHolder_frac_dx'],
params['frd_lorentz_factor'],
params['det_vert'],
params['slitHolder_frac_dx']]))
logging.info('set of pupil_parameters III. : ' + str([params['wide_0'], params['wide_23'],
params['wide_43'], params['misalign']]))
time_start_single_2 = time.time()
# initialize galsim.Aperature class
pupil = self._get_Pupil()
aper = galsim.Aperture(
diam=pupil.size,
pupil_plane_im=pupil.illuminated.astype(np.float32),
pupil_plane_scale=pupil.scale,
pupil_plane_size=None)
if self.verbosity == 1:
if self.pupilExplicit is None:
logging.info('Requested pupil size is (pupil.size) [m]: ' + str(pupil.size))
logging.info('One pixel has size of (pupil.scale) [m]: ' + str(pupil.scale))
logging.info('Requested pupil has so many pixels (pupil_plane_im): '
+ str(pupil.illuminated.astype(np.int16).shape))
else:
logging.info('Supplied pupil size is (diam_sic) [m]: ' + str(self.diam_sic))
logging.info('One pixel has size of (diam_sic/npix) [m]: ' + str(self.diam_sic / self.npix))
logging.info('Requested pupil has so many pixels (pupilExplicit): '
+ str(self.pupilExplicit.shape))
time_end_single_2 = time.time()
if self.verbosity == 1:
logging.info('Time for _get_Pupil function is ' + str(time_end_single_2 - time_start_single_2))
time_start_single_3 = time.time()
# create array with pixels=1 if the area is illuminated and 0 if it is obscured
ilum = np.array(aper.illuminated, dtype=np.float32)
assert np.sum(ilum) > 0, str(self.pupil_parameters)
# gives size of the illuminated image
lower_limit_of_ilum = int(ilum.shape[0] / 2 - self.npix / 2)
higher_limit_of_ilum = int(ilum.shape[0] / 2 + self.npix / 2)
if self.verbosity == 1:
logging.info('lower_limit_of_ilum: ' + str(lower_limit_of_ilum))
logging.info('higher_limit_of_ilum: ' + str(higher_limit_of_ilum))
if self.pupilExplicit is None:
ilum[lower_limit_of_ilum:higher_limit_of_ilum,
lower_limit_of_ilum:higher_limit_of_ilum] = ilum[lower_limit_of_ilum:higher_limit_of_ilum,
lower_limit_of_ilum:higher_limit_of_ilum] *\
pupil.illuminated
else:
ilum[lower_limit_of_ilum:higher_limit_of_ilum,
lower_limit_of_ilum:higher_limit_of_ilum] = ilum[lower_limit_of_ilum:higher_limit_of_ilum,
lower_limit_of_ilum:higher_limit_of_ilum] *\
self.pupilExplicit.astype(np.float32)
if self.verbosity == 1:
logging.info('Size after padding zeros to 2x size'
+ 'and extra padding to get size suitable for FFT: '
+ str(ilum.shape))
# maximum extent of pupil image in units of radius of the pupil, needed for next step
size_of_ilum_in_units_of_radius = ilum.shape[0] / self.npix
if self.verbosity == 1:
logging.info('size_of_ilum_in_units_of_radius: ' + str(size_of_ilum_in_units_of_radius))
# do not caculate the ``radiometric effect (difference between entrance and exit pupil)
# if paramters are too small to make any difference
# if that is the case just declare the ``ilum_radiometric'' to be the same as ilum
# i.e., the illumination of the exit pupil is the same as the illumination of the entrance pupil
if params['radiometricExponent'] < 0.01 or params['radiometricEffect'] < 0.01:
if self.verbosity == 1:
logging.info('skiping ``radiometric effect\'\' ')
ilum_radiometric = ilum
else:
if self.verbosity == 1:
logging.info('radiometric parameters are: ')
logging.info('x_ilum,y_ilum,radiometricEffect,radiometricExponent'
+ str([params['x_ilum'], params['y_ilum'],
params['radiometricEffect'], params['radiometricExponent']]))
# add the change of flux between the entrance and exit pupil
# end product is radiometricEffectArray
points = np.linspace(-size_of_ilum_in_units_of_radius,
size_of_ilum_in_units_of_radius, num=ilum.shape[0])
xs, ys = np.meshgrid(points, points)
_radius_coordinate = np.sqrt(
(xs - params['x_ilum'] * params['dxFocal']) ** 2
+ (ys - params['y_ilum'] * params['dyFocal']) ** 2)
# change in v_0.14
# ilumination to which radiometric effet has been applied, describing
# difference betwen entrance and exit pupil
radiometricEffectArray = (1 + params['radiometricEffect']
* _radius_coordinate**2)**(-params['radiometricExponent'])
ilum_radiometric = np.nan_to_num(radiometricEffectArray * ilum, 0)
# this is where you can introduce some apodization in the pupil image by using the line below
# the apodization sigma is set to that in focus it is at 0.75
# for larger images, scale according to the size of the input image which is to be FFT-ed
# 0.75 is an arbitrary number
apodization_sigma = ((len(ilum_radiometric)) / 1158)**0.875 * 0.75
# apodization_sigma=0.75
time_start_single_4 = time.time()
# old code where I applied Gaussian to the whole ilum image
# ilum_radiometric_apodized = gaussian_filter(ilum_radiometric, sigma=apodization_sigma)
# cut out central region, apply Gaussian on the center region and return to the full size image
# done to spped up the calculation
# noqa: E128 in order to keep informative names
ilum_radiometric_center_region =\
ilum_radiometric[(lower_limit_of_ilum - int(np.ceil(3 * apodization_sigma))):
(higher_limit_of_ilum + int(np.ceil(3 * apodization_sigma))),
(lower_limit_of_ilum - int(np.ceil(3 * apodization_sigma))):
(higher_limit_of_ilum + int(np.ceil(3 * apodization_sigma)))]
ilum_radiometric_center_region_apodized = gaussian_filter(
ilum_radiometric_center_region, sigma=apodization_sigma)
ilum_radiometric_apodized = np.copy(ilum_radiometric)
ilum_radiometric_apodized[(lower_limit_of_ilum - int(np.ceil(3 * apodization_sigma))):
(higher_limit_of_ilum + int(np.ceil(3 * apodization_sigma))),
(lower_limit_of_ilum - int(np.ceil(3 * apodization_sigma))):
(higher_limit_of_ilum + int(np.ceil(3 * apodization_sigma)))] =\
ilum_radiometric_center_region_apodized # noqa E:122
time_end_single_4 = time.time()
if self.verbosity == 1:
logging.info('Time to apodize the pupil: ' + str(time_end_single_4 - time_start_single_4))
logging.info('type(ilum_radiometric_apodized)' + str(type(ilum_radiometric_apodized[0][0])))
# put pixels for which amplitude is less than 0.01 to 0
r_ilum_pre = np.copy(ilum_radiometric_apodized)
r_ilum_pre[ilum_radiometric_apodized > 0.01] = 1
r_ilum_pre[ilum_radiometric_apodized < 0.01] = 0
ilum_radiometric_apodized_bool = r_ilum_pre.astype(bool)
# manual creation of aper.u and aper.v (mimicking steps which were automatically done in galsim)
# this gives position information about each point in the exit pupil so we can apply wavefront to it
# aperu_manual=[]
# for i in range(len(ilum_radiometric_apodized_bool)):
# aperu_manual.append(np.linspace(-diam_sic*(size_of_ilum_in_units_of_radius/2),
# diam_sic*(size_of_ilum_in_units_of_radius/2),len(ilum_radiometric_apodized_bool), endpoint=True))
single_line_aperu_manual = np.linspace(-diam_sic * (size_of_ilum_in_units_of_radius / 2), diam_sic * (
size_of_ilum_in_units_of_radius / 2), len(ilum_radiometric_apodized_bool), endpoint=True)
aperu_manual = np.tile(
single_line_aperu_manual,
len(single_line_aperu_manual)).reshape(
len(single_line_aperu_manual),
len(single_line_aperu_manual))
# full grid
# u_manual=np.array(aperu_manual)
u_manual = aperu_manual
v_manual = np.transpose(aperu_manual)
# select only parts of the grid that are actually illuminated
u = u_manual[ilum_radiometric_apodized_bool]
v = v_manual[ilum_radiometric_apodized_bool]
time_end_single_3 = time.time()
if self.verbosity == 1:
logging.info('Time for postprocessing pupil after _get_Pupil '
+ str(time_end_single_3 - time_start_single_3))
time_end_single_1 = time.time()
if self.verbosity == 1:
logging.info('Time for pupil and illumination calculation is '
+ str(time_end_single_1 - time_start_single_1))
################################################################################
# wavefront
################################################################################
# create wavefront across the exit pupil
time_start_single = time.time()
if self.verbosity == 1:
logging.info('')
logging.info('Starting creation of wavefront')
aberrations_init = [0.0, 0, 0.0, 0.0]
aberrations = aberrations_init
# list of aberrations where we set z4, z11, z22 etc...
# This is only for testing purposes to study behaviour of non-focus terms
aberrations_0 = list(np.copy(aberrations_init))
for i in range(4, self.zmax + 1):
aberrations.append(params['z{}'.format(i)])
if i in [4, 11, 22]:
aberrations_0.append(0)
else:
aberrations_0.append(params['z{}'.format(i)])
# if you have passed abberation above Zernike 22, join them with lower
# order abberations here
if self.extraZernike is None:
pass
else:
aberrations_extended = np.concatenate((aberrations, self.extraZernike), axis=0)
if self.verbosity == 1:
logging.info('diam_sic [m]: ' + str(diam_sic))
logging.info('aberrations: ' + str(aberrations))
logging.info('aberrations moved to z4=0: ' + str(aberrations_0))
logging.info('aberrations extra: ' + str(self.extraZernike))
logging.info('wavelength [nm]: ' + str(self.wavelength))
if self.extraZernike is None:
optics_screen = galsim.phase_screens.OpticalScreen(
diam=diam_sic, aberrations=aberrations, lam_0=self.wavelength)
if self.save == 1:
# only create fake with abberations 0 if we are going to save i.e., if we
# presenting the results
optics_screen_fake_0 = galsim.phase_screens.OpticalScreen(
diam=diam_sic, aberrations=aberrations_0, lam_0=self.wavelength)
else:
optics_screen = galsim.phase_screens.OpticalScreen(
diam=diam_sic, aberrations=aberrations_extended, lam_0=self.wavelength)
if self.save == 1:
# only create fake with abberations 0 if we are going to save i.e., if we
# presenting the results
optics_screen_fake_0 = galsim.phase_screens.OpticalScreen(
diam=diam_sic, aberrations=aberrations_0, lam_0=self.wavelength)
screens = galsim.PhaseScreenList(optics_screen)
if self.save == 1:
# only create fake with abberations 0 if we are going to save i.e., if we presenting the results
screens_fake_0 = galsim.PhaseScreenList(optics_screen_fake_0)
time_end_single = time.time()
################################################################################
# combining pupil illumination and wavefront
################################################################################
# apply wavefront to the array describing illumination
# logging.info(self.use_wf_grid)
if self.use_wf_grid is None:
wf = screens.wavefront(u, v, None, 0)
if self.save == 1:
wf_full = screens.wavefront(u_manual, v_manual, None, 0)
wf_grid = np.zeros_like(ilum_radiometric_apodized_bool, dtype=np.float32)
wf_grid[ilum_radiometric_apodized_bool] = (wf / self.wavelength)
wf_grid_rot = wf_grid
else:
# if you want to pass an explit wavefront, it goes here
wf_grid = self.use_wf_grid
wf_grid_rot = wf_grid
if self.save == 1:
# only create fake images with abberations set to 0 if we are going to save
# i.e., if we are testing the results
if self.verbosity == 1:
logging.info('creating wf_full_fake_0')
wf_full_fake_0 = screens_fake_0.wavefront(u_manual, v_manual, None, 0)
# exponential of the wavefront
expwf_grid = np.zeros_like(ilum_radiometric_apodized_bool, dtype=np.complex64)
expwf_grid[ilum_radiometric_apodized_bool] =\
ilum_radiometric_apodized[ilum_radiometric_apodized_bool] *\
np.exp(2j * np.pi * wf_grid_rot[ilum_radiometric_apodized_bool])
if self.verbosity == 1:
logging.info('Time for wavefront and wavefront/pupil combining is '
+ str(time_end_single - time_start_single))
################################################################################
# exectute the FFT
################################################################################
# updated up to here
######################################################################
time_start_single = time.time()
ftexpwf = np.fft.fftshift(scipy.fftpack.fft2(np.fft.fftshift(expwf_grid)))
img_apod = np.abs(ftexpwf)**2
time_end_single = time.time()
if self.verbosity == 1:
logging.info('Time for FFT is ' + str(time_end_single - time_start_single))
######################################################################
# size in arcseconds of the image generated by the code
scale_ModelImage_PFS_naturalResolution = sky_scale(
size_of_ilum_in_units_of_radius * self.diam_sic, self.wavelength)
self.scale_ModelImage_PFS_naturalResolution = scale_ModelImage_PFS_naturalResolution
if self.save == 1:
if socket.gethostname() == 'IapetusUSA' or socket.gethostname() == 'tiger2-sumire.princeton.edu' \
or socket.gethostname() == 'pfsa-usr01-gb.subaru.nao.ac.jp' or \
socket.gethostname() == 'pfsa-usr02-gb.subaru.nao.ac.jp':
np.save(self.TESTING_PUPIL_IMAGES_FOLDER + 'aperilluminated', aper.illuminated)
np.save(self.TESTING_PUPIL_IMAGES_FOLDER + 'ilum', ilum)
np.save(self.TESTING_PUPIL_IMAGES_FOLDER + 'ilum_radiometric', ilum_radiometric)
np.save(self.TESTING_PUPIL_IMAGES_FOLDER + 'ilum_radiometric_apodized',
ilum_radiometric_apodized)
np.save(self.TESTING_PUPIL_IMAGES_FOLDER + 'ilum_radiometric_apodized_bool',
ilum_radiometric_apodized_bool)
np.save(self.TESTING_WAVEFRONT_IMAGES_FOLDER + 'u_manual', u_manual)
np.save(self.TESTING_WAVEFRONT_IMAGES_FOLDER + 'v_manual', v_manual)
np.save(self.TESTING_WAVEFRONT_IMAGES_FOLDER + 'u', u)
np.save(self.TESTING_WAVEFRONT_IMAGES_FOLDER + 'v', v)
np.save(self.TESTING_WAVEFRONT_IMAGES_FOLDER + 'wf_grid', wf_grid)
if self.use_wf_grid is None:
np.save(self.TESTING_WAVEFRONT_IMAGES_FOLDER + 'wf_full', wf_full)
np.save(self.TESTING_WAVEFRONT_IMAGES_FOLDER + 'wf_full_fake_0', wf_full_fake_0)
np.save(self.TESTING_WAVEFRONT_IMAGES_FOLDER + 'expwf_grid', expwf_grid)
if self.verbosity == 1:
logging.info('Finished with _getOptPsf_naturalResolution')
logging.info('Finished with _getOptPsf_naturalResolution')
logging.info(' ')
if not return_intermediate_images:
return img_apod
if return_intermediate_images:
return img_apod, ilum[lower_limit_of_ilum:higher_limit_of_ilum,
lower_limit_of_ilum:higher_limit_of_ilum], wf_grid_rot
class LN_PFS_multi_same_spot(object):
"""!Class to compute quality of the multiple donut images,
of the same spot taken at different defocuses
Calls class LN_PFS_single, for example:
model = LN_PFS_single(sci_image,var_image,pupil_parameters = pupil_parameters,
use_pupil_parameters=None,zmax=zmax,save=1)
def model_return(allparameters_proposal):
return model(allparameters_proposal,return_Image=True)
Called by class Tokovinin_multi
"""
def __init__(
self,
list_of_sci_images,
list_of_var_images,
list_of_mask_images=None,
wavelength=None,
dithering=None,
save=None,
verbosity=None,
pupil_parameters=None,
use_pupil_parameters=None,
use_optPSF=None,
list_of_wf_grid=None,
zmax=None,
extraZernike=None,
pupilExplicit=None,
simulation_00=None,
double_sources=None,
double_sources_positions_ratios=None,
npix=None,
list_of_defocuses=None,
fit_for_flux=True,
test_run=False,
list_of_psf_positions=None,
use_center_of_flux=False):
"""
@param list_of_sci_images list of science images, list of 2d array
@param list_of_var_images list of variance images, 2d arrays,
which are the same size as sci_image
@param list_of_mask_images list of mask images, 2d arrays,
which are the same size as sci_image
@param dithering dithering, 1=normal, 2=two times higher resolution,
3=not supported
@param save save intermediate result in the process
(set value at 1 for saving)
@param verbosity verbosity of the process
(set value at 1 for full output)
@param pupil_parameters
@param use_pupil_parameters
@param use_optPSF
@param zmax largest Zernike order used
(11 or 22, or larger than 22)
@param extraZernike array consisting of higher order zernike
(if using higher order than 22)
@param pupilExplicit
@param simulation_00 resulting image will be centered with optical center
in the center of the image
and not fitted acorrding to the sci_image
@param double_sources 1 if there are other secondary sources in the image
@param double_sources_positions_ratios / arrray with parameters describing relative position\
and relative flux of the secondary source(s)
@param npxix size of the pupil (1536 reccomended)
@param list_of_defocuses list of defocuses at which images are taken
(float or string?)
@param fit_for_flux automatically fit for the best flux level
that minimizes the chi**2
@param test_run if True, skips the creation of model and
return science image - useful for testing
interaction of outputs of the module
in broader setting quickly
@param explicit_psf_position gives position of the opt_psf
"""
if verbosity is None:
verbosity = 0
if use_pupil_parameters is not None:
assert pupil_parameters is not None
# logging.info('double_sources in module: ' + str(double_sources))
# logging.info('double_sources_positions_ratios in module: ' + str(double_sources_positions_ratios))
# logging.info('list_of_psf_positions in LN_PFS_multi_same_spot '+str(list_of_psf_positions))
if double_sources is not None and bool(double_sources) is not False:
assert np.sum(np.abs(double_sources_positions_ratios)) > 0
if zmax is None:
zmax = 11
if zmax == 11:
self.columns = [
'z4',
'z5',
'z6',
'z7',
'z8',
'z9',
'z10',
'z11',
'detFrac',
'strutFrac',
'dxFocal',
'dyFocal',
'slitFrac',
'slitFrac_dy',
'wide_0',
'wide_23',
'wide_43',
'misalign',
'x_fiber',
'y_fiber',
'effective_ilum_radius',
'frd_sigma',
'frd_lorentz_factor',
'det_vert',
'slitHolder_frac_dx',
'grating_lines',
'scattering_slope',
'scattering_amplitude',
'pixel_effect',
'fiber_r',
'flux']
if zmax >= 22:
self.columns = [
'z4',
'z5',
'z6',
'z7',
'z8',
'z9',
'z10',
'z11',
'z12',
'z13',
'z14',
'z15',
'z16',
'z17',
'z18',
'z19',
'z20',
'z21',
'z22',
'detFrac',
'strutFrac',
'dxFocal',
'dyFocal',
'slitFrac',
'slitFrac_dy',
'wide_0',
'wide_23',
'wide_43',
'misalign',
'x_fiber',
'y_fiber',
'effective_ilum_radius',
'frd_sigma',
'frd_lorentz_factor',
'det_vert',
'slitHolder_frac_dx',
'grating_lines',
'scattering_slope',
'scattering_amplitude',
'pixel_effect',
'fiber_r',
'flux']
self.list_of_sci_images = list_of_sci_images
self.list_of_var_images = list_of_var_images
if list_of_mask_images is None:
list_of_mask_images = []
for i in range(len(list_of_sci_images)):
mask_image = np.zeros(list_of_sci_images[i].shape)
list_of_mask_images.append(mask_image)
self.list_of_mask_images = list_of_mask_images
# self.mask_image=mask_image
# self.sci_image=sci_image
# self.var_image=var_image
self.wavelength = wavelength
self.dithering = dithering
self.save = save
self.pupil_parameters = pupil_parameters
self.use_pupil_parameters = use_pupil_parameters
self.use_optPSF = use_optPSF
self.pupilExplicit = pupilExplicit
self.simulation_00 = simulation_00
self.zmax = zmax
self.extraZernike = extraZernike
self.verbosity = verbosity
self.double_sources = double_sources
self.double_sources_positions_ratios = double_sources_positions_ratios
self.npix = npix
self.fit_for_flux = fit_for_flux
self.list_of_defocuses = list_of_defocuses
self.test_run = test_run
if list_of_psf_positions is None:
list_of_psf_positions = [None] * len(list_of_sci_images)
self.list_of_psf_positions = list_of_psf_positions
if list_of_wf_grid is None:
list_of_wf_grid = [None] * len(list_of_sci_images)
self.list_of_wf_grid = list_of_wf_grid
# self.use_only_chi=use_only_chi
self.use_center_of_flux = use_center_of_flux
def move_parametrizations_from_1d_to_2d(self, allparameters_parametrizations_1d, zmax=None):
"""Reshape the parametrization from 1d array to 2d array
Parameters
----------
allparameters_parametrizations_1d : `np.array`
Parametriztion to be reshaped
zmax : `int`
Highest order of Zernike parameters applied
Returns
----------
allparameters_parametrizations_2d : `np.array`
Parametrization in 2d form
"""
# 22 parameters has len of 61
if zmax is None:
zmax = int((len(allparameters_parametrizations_1d) - 61) / 2 + 22)
assert len(allparameters_parametrizations_1d.shape) == 1
z_parametrizations = allparameters_parametrizations_1d[:19 * 2].reshape(19, 2)
g_parametrizations =\
np.transpose(np.vstack((np.zeros(len(allparameters_parametrizations_1d[19 * 2:19 * 2 + 23])),
allparameters_parametrizations_1d[19 * 2:19 * 2 + 23])))
if zmax > 22:
extra_Zernike_parameters_number = zmax - 22
z_extra_parametrizations = allparameters_parametrizations_1d[19 * 2 + 23:].reshape(
extra_Zernike_parameters_number, 2)
if zmax <= 22:
allparameters_parametrizations_2d = np.vstack((z_parametrizations, g_parametrizations))
if zmax > 22:
allparameters_parametrizations_2d = np.vstack(
(z_parametrizations, g_parametrizations, z_extra_parametrizations))
# logging.info('allparameters_parametrizations_2d[41]: '+ str(allparameters_parametrizations_2d[41]))
# assert allparameters_parametrizations_2d[41][1] >= 0.98
# assert allparameters_parametrizations_2d[41][1] <= 1.02
return allparameters_parametrizations_2d
def create_list_of_allparameters(self, allparameters_parametrizations, list_of_defocuses=None, zmax=None):
"""Create list of parameters at given defocuses
Given the parametrizations (in either 1d or 2d ),
create list_of_allparameters to be used in analysis of single images
Parameters
----------
allparameters_parametrizations : `np.array`
Input parametrizations
list_of_defocuses : `list`
List contaning the strings of defoucses at which we are searching for parameters
zmax : `int`
Highest order of Zernike parameters applied
Returns
----------
list_of_allparameters : `list`
List contaning the parameters for each defocus position
"""
# logging.info('allparameters_parametrizations '+str(allparameters_parametrizations))
if zmax is None:
zmax = self.zmax
# if you have passed parameterization in 1d, move to 2d
# logging.info("allparameters_parametrizations.type: "+str(type(allparameters_parametrizations)))
# logging.info("allparameters_parametrizations.len: "+str(+len(allparameters_parametrizations)))
# logging.info("allparameters_parametrizations.shape: "+str(allparameters_parametrizations.shape))
if len(allparameters_parametrizations.shape) == 1:
allparameters_parametrizations = self.move_parametrizations_from_1d_to_2d(
allparameters_parametrizations)
list_of_allparameters = []
# if this is only a single image, just return the input
if list_of_defocuses is None:
return allparameters_parametrizations
else:
list_of_defocuses_int = self.transform_list_of_defocuses_from_str_to_float(list_of_defocuses)
# logging.info(list_of_defocuses_int)
# go through the list of defocuses, and create the allparameters array for each defocus
for i in range(len(list_of_defocuses)):
list_of_allparameters.append(
self.create_allparameters_single(
list_of_defocuses_int[i],
allparameters_parametrizations,
zmax))
# logging.info(list_of_allparameters)
return list_of_allparameters
def value_at_defocus(self, mm, a, b=None):
"""Calculate linear fit to a value at a given defocus (in mm)
Parameters
----------
mm : `float`
Slit defocus in mm
a : `float`
Linear parameter
b :
Contstant offset
Returns
----------
: `float`
Result of linear fit
"""
if b is None:
return a
else:
return a * mm + b
def create_allparameters_single(self, mm, array_of_polyfit_1_parameterizations, zmax=None):
""" Given the defous, transform parametrization into parameters for that defocus
This function ransforms 1d array of ``parametrizations'' into ``parameters''m i.e.,
into form acceptable for creating single images.
This is a workhorse function used by function create_list_of_allparameters
Parameters
----------
mm : `float`
defocus of the slit
array_of_polyfit_1_parameterizations : `np.array`
parametrization for linear fit for the parameters as a function of focus
zmax : `int`
Highest order of Zernike parameters applied
Returns
----------
allparameters_proposal_single : `np.array`
Parameters that can be used to create single image
"""
if zmax is None:
# if len is 42, the zmax is 22
zmax = array_of_polyfit_1_parameterizations.shape[0] - 42 + 22
if zmax > 22:
extra_Zernike_parameters_number = zmax - 22
else:
extra_Zernike_parameters_number = zmax - 22
# for single case, up to z11
if zmax == 11:
z_parametrizations = array_of_polyfit_1_parameterizations[:8]
g_parametrizations = array_of_polyfit_1_parameterizations[8:]
allparameters_proposal_single = np.zeros((8 + len(g_parametrizations)))
for i in range(0, 8, 1):
allparameters_proposal_single[i] = self.value_at_defocus(
mm, z_parametrizations[i][0], z_parametrizations[i][1])
for i in range(len(g_parametrizations)):
allparameters_proposal_single[i + 8] = g_parametrizations[i][1]
if zmax >= 22:
z_parametrizations = array_of_polyfit_1_parameterizations[:19]
g_parametrizations = array_of_polyfit_1_parameterizations[19:19 + 23]
if extra_Zernike_parameters_number > 0:
z_extra_parametrizations = array_of_polyfit_1_parameterizations[42:]
allparameters_proposal_single = np.zeros(
(19 + len(g_parametrizations) + extra_Zernike_parameters_number))
for i in range(0, 19, 1):
# logging.info(str([i,mm,z_parametrizations[i]]))
allparameters_proposal_single[i] = self.value_at_defocus(
mm, z_parametrizations[i][0], z_parametrizations[i][1])
for i in range(len(g_parametrizations)):
allparameters_proposal_single[19 + i] = g_parametrizations[i][1]
for i in range(0, extra_Zernike_parameters_number, 1):
# logging.info(str([i,mm,z_parametrizations[i]]))
allparameters_proposal_single[19 + len(g_parametrizations) + i] = self.value_at_defocus(
mm, z_extra_parametrizations[i][0], z_extra_parametrizations[i][1])
return allparameters_proposal_single
def transform_list_of_defocuses_from_str_to_float(self, list_of_defocuses):
"""Transfroms list_of_defocuses from strings to float values
Parameters
----------
list_of_defocuses : `list`
list of defocuses in string form (e.g., [m4,m25,0,p15,p4])
Returns
----------
list_of_defocuses_float : `list`
list of defocuses in float form
"""
list_of_defocuses_float = []
for i in range(len(list_of_defocuses)):
if list_of_defocuses[i][0] == '0':
list_of_defocuses_float.append(0)
else:
if list_of_defocuses[i][0] == 'm':
sign = -1
if list_of_defocuses[i][0] == 'p':
sign = +1
if len(list_of_defocuses[i]) == 2:
list_of_defocuses_float.append(sign * float(list_of_defocuses[i][1:]))
else:
list_of_defocuses_float.append(sign * float(list_of_defocuses[i][1:]) / 10)
return list_of_defocuses_float
def create_resonable_allparameters_parametrizations(
self,
array_of_allparameters,
list_of_defocuses_input,
zmax,
remove_last_n=None):
"""Create ``parametrizations'' from list of ``parameters'' and defocuses
Given parameters for single defocus images and their defocuses,
create parameterizations (1d functions) for multi-image linear fit across various defocuses
This is the inverse of function `create_list_of_allparameters`
Parameters
----------
array_of_allparameters : `np.array`
Array with parameters of defocus, 2d array with shape
[n(list_of_defocuses),number of parameters]
list_of_defocuses_input : `list`
List of strings at which defocuses are the data
from array_of_allparameters
zmax : `int`
Highest order of Zernike parameters applied
remove_last_n : `int`
Do not do the fit for the last 'n' parameters
If not specified, it defaults to 2
Returns
----------
array_of_polyfit_1_parameterizations : `np.array`
Array contaning output 1d ``parameterizations
"""
if remove_last_n is None:
remove_last_n = 2
list_of_defocuses_int = self.transform_list_of_defocuses_from_str_to_float(list_of_defocuses_input)
if remove_last_n > 0:
array_of_allparameters = array_of_allparameters[:, :-remove_last_n]
if zmax <= 22:
len_of_iterations = array_of_allparameters.shape[1]
else:
len_of_iterations = 42 + zmax - 22
list_of_polyfit_1_parameter = []
for i in range(len_of_iterations):
# logging.info([i,array_of_allparameters.shape[1]])
if i < array_of_allparameters.shape[1]:
# logging.info('i'+str(i)+' '+str(array_of_allparameters[:,i]))
polyfit_1_parameter = np.polyfit(
x=list_of_defocuses_int, y=array_of_allparameters[:, i], deg=1)
else:
# logging.info('i'+str(i)+' '+'None')
# if you have no input for such high level of Zernike, set it at zero
polyfit_1_parameter = np.array([0, 0])
# logging.info('i_polyfit'+str(i)+' '+str(polyfit_1_parameter))
list_of_polyfit_1_parameter.append(polyfit_1_parameter)
array_of_polyfit_1_parameterizations = np.array(list_of_polyfit_1_parameter)
# list_of_defocuses_output_int=self.transform_list_of_defocuses_from_str_to_float(list_of_defocuses_input)
# list_of_allparameters=[]
# for i in list_of_defocuses_output_int:
# allparameters_proposal_single=self.create_allparameters_single(i,array_of_polyfit_1_parameterizations,zmax=self.zmax)
# list_of_allparameters.append(allparameters_proposal_single)
return array_of_polyfit_1_parameterizations
def lnlike_Neven_multi_same_spot(self, list_of_allparameters_input, return_Images=False,
use_only_chi=False, multi_background_factor=3):
"""Create model images and estimate their quality
Creates model images, and compares them to supplied data
Parameters
----------
list_of_allparameters_input : `list`
List of parameteres to create image at each defocus
return_Images : `bool`
If True, return all the created images and auxiliary data
use_only_chi : `bool`
If True, use chi as the quality measure
If False, use chi**2 as the quality measure
multi_background_factor : `int`
Only consider pixels with flux above this factor * background level
Returns
----------
(if return_Images is False):
mean_res_of_multi_same_spot : `float`
Mean quality of all images
(if return_Images is True):
mean_res_of_multi_same_spot [index 0] : `float`
Mean quality of all images
list_of_single_res [index 1] : `list`
Quality per image
list_of_single_model_image [index 2] : `list`
List of created model images
list_of_single_allparameters [index 3] : `list`
List of parameters per image
list_of_single_chi_results [index 4] : `list`
List of arrays describing quality of fitting
Each of these array contains
0. chi2_max value, 1. Qvalue, 2. (chi or chi2)/d.o.f., 3. (chi2 or chi2_max)/d.o.f.
array_of_psf_positions_output [index 5] : `np.array`
Array showing the centering of images
"""
self.use_only_chi = use_only_chi
list_of_single_res = []
if return_Images:
list_of_single_model_image = []
list_of_single_allparameters = []
list_of_single_chi_results = []
if len(self.list_of_sci_images) == len(list_of_allparameters_input):
list_of_allparameters = np.copy(list_of_allparameters_input)
else:
allparametrization = list_of_allparameters_input
# logging.info('self.list_of_defocuses: ' + str(self.list_of_defocuses))
# logging.info('allparametrization.type: ' + str(allparametrization.type))
list_of_allparameters = self.create_list_of_allparameters(
allparametrization, list_of_defocuses=self.list_of_defocuses)
if self.verbosity == 1:
logging.info('Starting LN_PFS_multi_same_spot for parameters-hash '
+ str(hash(str(allparametrization.data)))
+ ' at ' + str(time.time()) + ' in thread '
+ str(threading.get_ident()))
assert len(self.list_of_sci_images) == len(list_of_allparameters)
# logging.info(len(self.list_of_sci_images))
# logging.info(len(list_of_allparameters))
# use same weights, experiment
# if use_only_chi==True:
# renormalization_of_var_sum=np.ones((len(self.list_of_sci_images)))*len(self.list_of_sci_images)
# central_index=int(len(self.list_of_sci_images)/2)
# renormalization_of_var_sum[central_index]=1
# else:
# find image with lowest variance - pressumably the one in focus
# array_of_var_sum=np.array(list(map(np.sum,self.list_of_var_images)))
# index_of_max_var_sum=np.where(array_of_var_sum==np.min(array_of_var_sum))[0][0]
# find what variance selectes top 20% of pixels
# this is done to weight more the images in focus and less the image out of focus in the
# final likelihood result
# quantile_08_focus=np.quantile(self.list_of_sci_images[index_of_max_var_sum],0.8)
list_of_var_sums = []
for i in range(len(list_of_allparameters)):
# taking from create_chi_2_almost function in LN_PFS_single
mask_image = self.list_of_mask_images[i]
var_image = self.list_of_var_images[i]
sci_image = self.list_of_sci_images[i]
# array that has True for values which are good and False for bad values
inverted_mask = ~mask_image.astype(bool)
try:
if sci_image.shape[0] == 20:
multi_background_factor = 3
# logging.info('var_image.shape: '+str(var_image.shape))
# logging.info('multi_background_factor: '+str(multi_background_factor))
# logging.info('np.median(var_image[0]): '+str(np.median(var_image[0])))
# logging.info('np.median(var_image[-1]): '+str(np.median(var_image[-1])))
# logging.info('np.median(var_image[:,0]): '+str(np.median(var_image[:,0])))
# logging.info('np.median(var_image[:,-1]): '+str(np.median(var_image[:,-1])))
mean_value_of_background_via_var = np.mean([np.median(var_image[0]), np.median(
var_image[-1]), np.median(var_image[:, 0]),
np.median(var_image[:, -1])]) * multi_background_factor
# logging.info('mean_value_of_background_via_var: '+str(mean_value_of_background_via_var))
mean_value_of_background_via_sci = np.mean([np.median(sci_image[0]), np.median(
sci_image[-1]), np.median(sci_image[:, 0]),
np.median(sci_image[:, -1])]) * multi_background_factor
# logging.info('mean_value_of_background_via_sci: '+str(mean_value_of_background_via_sci))
mean_value_of_background = np.max(
[mean_value_of_background_via_var, mean_value_of_background_via_sci])
except BaseException:
pass
# select only images with above 80% percentile of the image with max variance?
var_image_masked = var_image * inverted_mask
var_image_masked_without_nan = var_image_masked.ravel()[
var_image_masked.ravel() > mean_value_of_background]
if use_only_chi:
# if you level is too high
if len(var_image_masked_without_nan) == 0:
var_sum = -1
else:
# var_sum=-(1)*(np.sum(np.sqrt(np.abs(var_image_masked_without_nan))))
var_sum = -1
else:
# if you level is too high
if len(var_image_masked_without_nan) == 0:
var_sum = -(1)
else:
var_sum = -(1) * (np.mean(np.abs(var_image_masked_without_nan)))
list_of_var_sums.append(var_sum)
# renormalization needs to be reconsidered?
array_of_var_sum = np.array(list_of_var_sums)
max_of_array_of_var_sum = np.max(array_of_var_sum)
renormalization_of_var_sum = array_of_var_sum / max_of_array_of_var_sum
# logging.info('renormalization_of_var_sum'+str(renormalization_of_var_sum))
list_of_psf_positions_output = []
for i in range(len(list_of_allparameters)):
# if image is in focus which at this point is the size of image with 20
if (self.list_of_sci_images[i].shape)[0] == 20:
if self.use_center_of_flux:
use_center_of_flux = True
else:
use_center_of_flux = False
else:
use_center_of_flux = False
if self.verbosity == 1:
logging.info('################################')
logging.info('analyzing image ' + str(i + 1) + ' out of ' + str(len(list_of_allparameters)))
logging.info(' ')
# if this is the first image, do the full analysis, generate new pupil and illumination
if i == 0:
model_single = LN_PFS_single(
self.list_of_sci_images[i],
self.list_of_var_images[i],
self.list_of_mask_images[i],
wavelength=self.wavelength,
dithering=self.dithering,
save=self.save,
verbosity=self.verbosity,
pupil_parameters=self.pupil_parameters,
use_pupil_parameters=self.use_pupil_parameters,
use_optPSF=self.use_optPSF,
use_wf_grid=self.list_of_wf_grid[i],
zmax=self.zmax,
extraZernike=self.extraZernike,
pupilExplicit=self.pupilExplicit,
simulation_00=self.simulation_00,
double_sources=self.double_sources,
double_sources_positions_ratios=self.double_sources_positions_ratios,
npix=self.npix,
fit_for_flux=self.fit_for_flux,
test_run=self.test_run,
explicit_psf_position=self.list_of_psf_positions[i],
use_only_chi=self.use_only_chi,
use_center_of_flux=use_center_of_flux)
res_single_with_intermediate_images = model_single(
list_of_allparameters[i],
return_Image=True,
return_intermediate_images=True,
use_only_chi=use_only_chi,
multi_background_factor=multi_background_factor)
if res_single_with_intermediate_images == -np.inf:
return -np.inf
if isinstance(res_single_with_intermediate_images, tuple):
if res_single_with_intermediate_images[0] == -np.inf:
return -np.inf
likelihood_result = res_single_with_intermediate_images[0]
model_image = res_single_with_intermediate_images[1]
allparameters = res_single_with_intermediate_images[2]
pupil_explicit_0 = res_single_with_intermediate_images[3]
# wf_grid_rot = res_single_with_intermediate_images[4]
chi_results = res_single_with_intermediate_images[5]
psf_position = res_single_with_intermediate_images[6]
list_of_single_res.append(likelihood_result)
list_of_psf_positions_output.append(psf_position)
if return_Images:
list_of_single_model_image.append(model_image)
list_of_single_allparameters.append(allparameters)
list_of_single_chi_results.append(chi_results)
# and if this is not the first image, use the pupil and illumination used in the first image
else:
model_single = LN_PFS_single(
self.list_of_sci_images[i],
self.list_of_var_images[i],
self.list_of_mask_images[i],
wavelength=self.wavelength,
dithering=self.dithering,
save=self.save,
verbosity=self.verbosity,
pupil_parameters=self.pupil_parameters,
use_pupil_parameters=self.use_pupil_parameters,
use_optPSF=self.use_optPSF,
use_wf_grid=self.list_of_wf_grid[i],
zmax=self.zmax,
extraZernike=self.extraZernike,
pupilExplicit=pupil_explicit_0,
simulation_00=self.simulation_00,
double_sources=self.double_sources,
double_sources_positions_ratios=self.double_sources_positions_ratios,
npix=self.npix,
fit_for_flux=self.fit_for_flux,
test_run=self.test_run,
explicit_psf_position=self.list_of_psf_positions[i],
use_only_chi=self.use_only_chi,
use_center_of_flux=use_center_of_flux)
if not return_Images:
res_single_without_intermediate_images = model_single(
list_of_allparameters[i],
return_Image=return_Images,
use_only_chi=use_only_chi,
multi_background_factor=multi_background_factor)
likelihood_result = res_single_without_intermediate_images[0]
psf_position = res_single_with_intermediate_images[-1]
# logging.info(likelihood_result)
list_of_single_res.append(likelihood_result)
list_of_psf_positions_output.append(psf_position)
if return_Images:
res_single_with_an_image = model_single(
list_of_allparameters[i], return_Image=return_Images, use_only_chi=use_only_chi)
if res_single_with_an_image == -np.inf:
return -np.inf
likelihood_result = res_single_with_an_image[0]
model_image = res_single_with_an_image[1]
allparameters = res_single_with_an_image[2]
chi_results = res_single_with_an_image[3]
psf_position = res_single_with_an_image[-1]
list_of_single_res.append(likelihood_result)
list_of_single_model_image.append(model_image)
list_of_single_allparameters.append(allparameters)
list_of_single_chi_results.append(chi_results)
list_of_psf_positions_output.append(psf_position)
# possibly implement intermediate images here
array_of_single_res = np.array(list_of_single_res)
array_of_psf_positions_output = np.array(list_of_psf_positions_output)
# renormalization
if self.verbosity == 1:
logging.info('################################')
logging.info('Likelihoods returned per individual images are: ' + str(array_of_single_res))
logging.info('Mean likelihood is ' + str(np.mean(array_of_single_res)))
# mean_res_of_multi_same_spot=np.mean(array_of_single_res)
mean_res_of_multi_same_spot = np.mean(array_of_single_res / renormalization_of_var_sum)
if self.verbosity == 1:
logging.info('################################')
logging.info('Renormalized likelihoods returned per individual images are: '
+ str(array_of_single_res / renormalization_of_var_sum))
logging.info('Renormalization factors are: ' + str(renormalization_of_var_sum))
logging.info('Mean renormalized likelihood is ' + str(mean_res_of_multi_same_spot))
logging.info('array_of_psf_positions_output: ' + str(array_of_psf_positions_output))
if self.verbosity == 1:
# logging.info('Ending LN_PFS_multi_same_spot for parameters-hash '+
# str(hash(str(allparametrization.data)))+' at '+str(time.time())+
# ' in thread '+str(threading.get_ident()))
logging.info('Ending LN_PFS_multi_same_spot at time '
+ str(time.time()) + ' in thread ' + str(threading.get_ident()))
logging.info(' ')
if not return_Images:
return mean_res_of_multi_same_spot
if return_Images:
# 0. mean_res_of_multi_same_spot - mean likelihood per images, renormalized
# 1. list_of_single_res - likelihood per image, not renormalized
# 2. list_of_single_model_image - list of created model images
# 3. list_of_single_allparameters - list of parameters per image?
# 4. list_of_single_chi_results - list of arrays describing quality of fitting
# 1. chi2_max value, 2. Qvalue, 3. chi2/d.o.f., 4. chi2_max/d.o.f.
# 5. array_of_psf_positions_output - list showing the centering of images
return mean_res_of_multi_same_spot, list_of_single_res, list_of_single_model_image,\
list_of_single_allparameters, list_of_single_chi_results, array_of_psf_positions_output
def __call__(
self,
list_of_allparameters,
return_Images=False,
use_only_chi=False,
multi_background_factor=3):
return self.lnlike_Neven_multi_same_spot(list_of_allparameters, return_Images=return_Images,
use_only_chi=use_only_chi,
multi_background_factor=multi_background_factor)
class Tokovinin_multi(object):
"""
# improvments possible - modify by how much to move parameters based on the previous step
# in simplied H, take new model into account where doing changes
outputs:
initial_model_result,final_model_result,\
list_of_initial_model_result,list_of_final_model_result,\
out_images, pre_images, list_of_image_final,\
allparameters_parametrization_proposal, allparameters_parametrization_proposal_after_iteration,\
list_of_initial_input_parameters, list_of_finalinput_parameters,\
list_of_pre_chi2,list_of_after_chi2,\
list_of_psf_positions,list_of_final_psf_positions,\
[uber_images_normalized,uber_M0_std,H_std,array_of_delta_z_parametrizations_None,list_of_final_psf_positions]
explanation of the results:
0. likelihood averaged over all images (before the function)
1. likelihood averaged over all images (after the function)
2. likelihood per image (output from model_multi) (before the function)
3. likelihood per image (output from model_multi) (after the function)
4. out_images
5. list of initial model images
6. list of final model images
7. parametrization before the function
8. parametrization after the function
9. list of parameters per image (before the function)
10. list of parameters per image (after the function)
11. list of chi2 per image (before the function)
12. list of chi2 per image (after the function)
13. list of psf position of image (function the function)
14. list of psf position of image (after the function)
15. [uber_images_normalized,uber_M0,H,array_of_delta_z_parametrizations_None,list_of_final_psf_positions]
15.0. uber_images_normalized
15.1. uber_M0
15.2. H
15.3. array_of_delta_z_parametrizations_None
15.4. list_of_final_psf_positions
"""
def __init__(self, list_of_sci_images, list_of_var_images, list_of_mask_images=None,
wavelength=None, dithering=None, save=None, verbosity=None,
pupil_parameters=None, use_pupil_parameters=None, use_optPSF=None, list_of_wf_grid=None,
zmax=None, extraZernike=None, pupilExplicit=None, simulation_00=None,
double_sources=None, double_sources_positions_ratios=None, npix=None,
list_of_defocuses=None, fit_for_flux=True, test_run=False, list_of_psf_positions=None,
num_iter=None, move_allparameters=None, pool=None):
"""
@param list_of_sci_images list of science images, list of 2d array
@param list_of_var_images list of variance images, 2d arrays,
which are the same size as sci_image
@param list_of_mask_images list of mask images, 2d arrays,
which are the same size as sci_image
@param wavelength wavelength in nm, to be passed to to module
@param dithering dithering, 1=normal, 2=two times higher resolution,
3=not supported
@param save save intermediate result in the process
(set value at 1 for saving)
@param verbosity verbosity of the process
(set value at 2 for full output,
1 only in Tokovinin, 0==nothing)
@param pupil_parameters
@param use_pupil_parameters
@param use_optPSF
@param zmax largest Zernike order used (11 or 22, or larger than 22)
@param extraZernike array consisting of higher order zernike
(if using higher order than 22)
@param pupilExplicit
@param simulation_00 resulting image will be centered with optical center
in the center of the image
and not fitted acorrding to the sci_image
@param double_sources 1 if there are other secondary sources in the image
@param double_sources_positions_ratios / arrray with parameters describing relative position\
and relative flux of the secondary source(s)
@param npxix size of the pupil (1536 reccomended)
@param list_of_defocuses list of defocuses at which images are taken
(float or string?)
@param fit_for_flux automatically fit for the best flux level
that minimizes the chi**2
@param test_run if True, skips the creation of model and
return science image - useful for testing
interaction of outputs of the module
in broader setting quickly
@param list_of_psf_positions gives position of the opt_psf
@param num_iter number of iteration
@param move_allparameters if True change all parameters i.e.,
also ``global'' parameters, i.e.,
not just wavefront parameters
@param pool pass pool of workers to calculate
array of changes due to movement due to wavefront changes
"""
if verbosity is None:
verbosity = 0
if use_pupil_parameters is not None:
assert pupil_parameters is not None
if double_sources is not None and double_sources is not False:
assert np.sum(np.abs(double_sources_positions_ratios)) > 0
if zmax is None:
zmax = 22
if zmax == 11:
self.columns = [
'z4',
'z5',
'z6',
'z7',
'z8',
'z9',
'z10',
'z11',
'detFrac',
'strutFrac',
'dxFocal',
'dyFocal',
'slitFrac',
'slitFrac_dy',
'wide_0',
'wide_23',
'wide_43',
'misalign',
'x_fiber',
'y_fiber',
'effective_ilum_radius',
'frd_sigma',
'frd_lorentz_factor',
'det_vert',
'slitHolder_frac_dx',
'grating_lines',
'scattering_slope',
'scattering_amplitude',
'pixel_effect',
'fiber_r',
'flux']
if zmax >= 22:
self.columns = [
'z4',
'z5',
'z6',
'z7',
'z8',
'z9',
'z10',
'z11',
'z12',
'z13',
'z14',
'z15',
'z16',
'z17',
'z18',
'z19',
'z20',
'z21',
'z22',
'detFrac',
'strutFrac',
'dxFocal',
'dyFocal',
'slitFrac',
'slitFrac_dy',
'wide_0',
'wide_23',
'wide_43',
'misalign',
'x_fiber',
'y_fiber',
'effective_ilum_radius',
'frd_sigma',
'frd_lorentz_factor',
'det_vert',
'slitHolder_frac_dx',
'grating_lines',
'scattering_slope',
'scattering_amplitude',
'pixel_effect',
'fiber_r',
'flux']
self.list_of_sci_images = list_of_sci_images
self.list_of_var_images = list_of_var_images
if list_of_mask_images is None:
list_of_mask_images = []
for i in range(len(list_of_sci_images)):
mask_image = np.zeros(list_of_sci_images[i].shape)
list_of_mask_images.append(mask_image)
self.list_of_mask_images = list_of_mask_images
# implement custom variance image here
# self.mask_image=mask_image
# self.sci_image=sci_image
# self.var_image=var_image
self.wavelength = wavelength
self.dithering = dithering
self.save = save
self.pupil_parameters = pupil_parameters
self.use_pupil_parameters = use_pupil_parameters
self.use_optPSF = use_optPSF
self.pupilExplicit = pupilExplicit
self.simulation_00 = simulation_00
self.zmax = zmax
self.extraZernike = extraZernike
self.verbosity = verbosity
self.double_sources = double_sources
self.double_sources_positions_ratios = double_sources_positions_ratios
self.npix = npix
self.fit_for_flux = fit_for_flux
self.list_of_defocuses = list_of_defocuses
self.test_run = test_run
if list_of_psf_positions is None:
list_of_psf_positions = [None] * len(list_of_sci_images)
self.list_of_psf_positions = list_of_psf_positions
if list_of_wf_grid is None:
list_of_wf_grid = [None] * len(list_of_sci_images)
self.list_of_wf_grid = list_of_wf_grid
self.list_of_defocuses = list_of_defocuses
self.move_allparameters = move_allparameters
self.num_iter = num_iter
self.pool = pool
if self.verbosity >= 1:
self.verbosity_model = self.verbosity - 1
else:
self.verbosity_model = self.verbosity
# parameter that control if the intermediate outputs are saved to the hard disk
save = False
self.save = save
def Tokovinin_algorithm_chi_multi(self, allparameters_parametrization_proposal,
return_Images=False, num_iter=None, previous_best_result=None,
use_only_chi=False, multi_background_factor=3, up_to_which_z=None):
""" Apply Tokovinin algorithm to a set of images
Parameters
----------
allparameters_parametrization_proposal : `np.array`
2d parametrization of variables
return_Images : `bool`
if True, also return created images
num_iter : `int`
number of iteration, used when creating save files
previous_best_result : `np.array?`
output from previous Tokovinin run
use_only_chi : `bool`
if True, optimize using chi, not chi**2
multi_background_factor : `float`
take into account only pixels with flux this many times above the background
Returns
----------
(if return_Images == False)
final_model_result : `float`
averaged ``likelihood'' over all images
(if return_Images == True AND previous_best_result is None )
initial_model_result :
explanation
final_model_result : `float`
output with index 0 from model_multi - averaged ``likelihood'' over all input images
if the proposed images has worse quality then input, reproduce the input value
list_of_initial_model_result :
explanation
list_of_final_model_result :
explanation
allparameters_parametrization_proposal :
explanation
allparameters_parametrization_proposal_after_iteration :
explanation
list_of_initial_input_parameters :
explanation
list_of_finalinput_parameters :
explanation
list_of_pre_chi2 :
explanation
list_of_after_chi2 :
explanation
list_of_psf_positions :
explanation
list_of_final_psf_positions :
explanation
[uber_images_normalized, uber_M0_std, H_std,
array_of_delta_z_parametrizations_None, list_of_final_psf_positions]:
explanation
(if return_Images == True AND previous_best_result is avaliable )
"""
if self.verbosity >= 1:
logging.info('###############################################################################')
logging.info('###############################################################################')
logging.info('Starting Tokovinin_algorithm_chi_multi with num_iter: ' + str(num_iter))
logging.info('Tokovinin, return_Images: ' + str(return_Images))
logging.info('Tokovinin, num_iter: ' + str(num_iter))
logging.info('Tokovinin, use_only_chi: ' + str(use_only_chi))
logging.info('Tokovinin, multi_background_factor: ' + str(multi_background_factor))
logging.info('allparameters_parametrization_proposal'
+ str(allparameters_parametrization_proposal))
logging.info('allparameters_parametrization_proposal.shape'
+ str(allparameters_parametrization_proposal.shape))
list_of_sci_images = self.list_of_sci_images
list_of_var_images = self.list_of_var_images
list_of_mask_images = self.list_of_mask_images
double_sources_positions_ratios = self.double_sources_positions_ratios
list_of_defocuses_input_long = self.list_of_defocuses
if num_iter is None:
if self.num_iter is not None:
num_iter = self.num_iter
move_allparameters = self.move_allparameters
# if you passed previous best result, set the list_of_explicit_psf_positions
# by default it is put as the last element in the last cell in the previous_best_result output
if previous_best_result is not None:
# to be compatible with versions before 0.45
if len(previous_best_result) == 5:
self.list_of_psf_positions = previous_best_result[-1]
else:
self.list_of_psf_positions = previous_best_result[-1][-1]
##########################################################################
# Create initial modeling as basis for future effort
# the outputs of this section are 0. pre_model_result, 1. model_results, 2. pre_images,
# 3. pre_input_parameters, 4. chi_2_before_iteration_array, 5. list_of_psf_positions
if self.verbosity >= 1:
logging.info('list_of_defocuses analyzed: ' + str(list_of_defocuses_input_long))
# logging.info('list_of_sci_images'+str(list_of_sci_images))
# logging.info('list_of_var_images'+str(list_of_var_images))
# logging.info('list_of_mask_images'+str(list_of_mask_images))
# logging.info('wavelength'+str(self.wavelength))
# logging.info('dithering'+str(self.dithering))
# logging.info('self.save'+str(self.save))
# logging.info('self.zmax'+str(self.zmax))
# logging.info('self.double_sources'+str(self.double_sources))
# logging.info('self.double_sources_positions_ratios'+str(self.double_sources_positions_ratios))
# logging.info('self.npix'+str(self.npix))
# logging.info('self.list_of_defocuses_input_long'+str(list_of_defocuses_input_long))
# logging.info('self.fit_for_flux'+str(self.fit_for_flux))
# logging.info('self.test_run'+str(self.test_run))
# logging.info('self.list_of_psf_positions'+str(self.list_of_psf_positions))
model_multi = LN_PFS_multi_same_spot(
list_of_sci_images,
list_of_var_images,
list_of_mask_images=list_of_mask_images,
wavelength=self.wavelength,
dithering=self.dithering,
save=self.save,
zmax=self.zmax,
verbosity=self.verbosity_model,
double_sources=self.double_sources,
double_sources_positions_ratios=self.double_sources_positions_ratios,
npix=self.npix,
list_of_defocuses=list_of_defocuses_input_long,
fit_for_flux=self.fit_for_flux,
test_run=self.test_run,
list_of_psf_positions=self.list_of_psf_positions)
if self.verbosity >= 1:
logging.info('****************************')
logging.info('Starting Tokovinin procedure with num_iter: ' + str(num_iter))
logging.info('Initial testing proposal is: ' + str(allparameters_parametrization_proposal))
time_start_single = time.time()
# create list of minchains, one per each image
list_of_minchain = model_multi.create_list_of_allparameters(
allparameters_parametrization_proposal,
list_of_defocuses=list_of_defocuses_input_long,
zmax=self.zmax)
# if the parametrization is 2d array, move it into 1d shape
if len(allparameters_parametrization_proposal.shape) == 2:
allparameters_parametrization_proposal = move_parametrizations_from_2d_shape_to_1d_shape(
allparameters_parametrization_proposal)
if self.verbosity >= 1:
logging.info('Starting premodel analysis with num_iter: ' + str(num_iter))
# results from initial run, before running fitting algorithm
# pre_model_result - mean likelihood across all images, renormalized
# model_results - likelihood per image, not renormalized
# pre_images - list of created model images
# pre_input_parameters - list of parameters per image?
# chi_2_before_iteration_array - list of lists describing quality of fitting
# list_of_psf_positions -?
try:
# logging.info('len(list_of_minchain): '+str(len(list_of_minchain)))
# logging.info('list_of_minchain[0] '+str(list_of_minchain[0]))
# logging.info('multi_background_factor: '+str(multi_background_factor))
# logging.info('type'+str(type(multi_background_factor)))
# logging.info('up_to_which_z: '+str(up_to_which_z))
# logging.info(str( list_of_minchain))
# logging.info('use_only_chi: '+str( use_only_chi))
# logging.info('list_of_minchain: '+str( list_of_minchain))
pre_model_result, model_results, pre_images, pre_input_parameters, chi_2_before_iteration_array,\
list_of_psf_positions =\
model_multi(list_of_minchain, return_Images=True, use_only_chi=use_only_chi,
multi_background_factor=multi_background_factor)
# modify variance image according to the models that have just been created
# first time modifying variance image
list_of_single_model_image = pre_images
list_of_var_images_via_model = []
for index_of_single_image in range(len(list_of_sci_images)):
popt = create_popt_for_custom_var(self.list_of_sci_images[index_of_single_image],
self.list_of_var_images[index_of_single_image],
self.list_of_mask_images[index_of_single_image])
single_var_image_via_model =\
create_custom_var_from_popt(list_of_single_model_image[index_of_single_image], popt)
list_of_var_images_via_model.append(single_var_image_via_model)
# replace the variance images provided with these custom variance images
list_of_var_images = list_of_var_images_via_model
# self.list_of_var_images = list_of_var_images
except Exception as e:
logging.info('Exception is: ' + str(e))
logging.info('Exception type is: ' + str(repr(e)))
logging.info(traceback.logging.info_exc())
if self.verbosity >= 1:
logging.info('Premodel analysis failed')
# if the modelling failed
# returning 7 nan values to be consistent with what would be the return if the algorithm passed
# at position 0 return extremly likelihood to indicate failure
# at position 3 return the input parametrization
# return -9999999,np.nan,np.nan,allparameters_parametrization_proposal,np.nan,np.nan,np.nan
return -9999999, -9999999, np.nan, np.nan, np.nan, np.nan, np.nan,
allparameters_parametrization_proposal, allparameters_parametrization_proposal,
np.nan, np.nan, np.nan, np.nan, np.nan, np.nan
if self.verbosity >= 1:
logging.info('list_of_psf_positions at the input stage: ' + str(np.array(list_of_psf_positions)))
if self.save:
np.save('/tigress/ncaplar/Results/allparameters_parametrization_proposal_' + str(num_iter),
allparameters_parametrization_proposal)
np.save('/tigress/ncaplar/Results/pre_images_' + str(num_iter),
pre_images)
np.save('/tigress/ncaplar/Results/pre_input_parameters_' + str(num_iter),
pre_input_parameters)
np.save('/tigress/ncaplar/Results/list_of_sci_images_' + str(num_iter),
list_of_sci_images)
np.save('/tigress/ncaplar/Results/list_of_var_images_' + str(num_iter),
list_of_var_images)
np.save('/tigress/ncaplar/Results/list_of_mask_images_' + str(num_iter),
list_of_mask_images)
# extract the parameters which will not change in this function, i.e., non-wavefront parameters
nonwavefront_par = list_of_minchain[0][19:42]
time_end_single = time.time()
if self.verbosity >= 1:
logging.info('Total time taken for premodel analysis with num_iter ' + str(num_iter)
+ ' was ' + str(time_end_single - time_start_single) + ' seconds')
logging.info('chi_2_before_iteration is: ' + str(chi_2_before_iteration_array))
logging.info('Ended premodel analysis ')
logging.info('***********************')
# import science images and determine the flux mask
list_of_mean_value_of_background = []
list_of_flux_mask = []
list_of_sci_image_std = []
for i in range(len(list_of_sci_images)):
sci_image = list_of_sci_images[i]
var_image = list_of_var_images[i]
# do not use this for images in focus or near focus
# probably needs to be done better than via shape measurment
if sci_image.shape[0] == 20:
multi_background_factor = 3
mean_value_of_background_via_var = np.mean([np.median(var_image[0]), np.median(
var_image[-1]), np.median(var_image[:, 0]),
np.median(var_image[:, -1])]) * multi_background_factor
mean_value_of_background_via_sci = np.mean([np.median(sci_image[0]), np.median(
sci_image[-1]), np.median(sci_image[:, 0]),
np.median(sci_image[:, -1])]) * multi_background_factor
mean_value_of_background = np.max(
[mean_value_of_background_via_var, mean_value_of_background_via_sci])
if self.verbosity > 1:
logging.info(
str(multi_background_factor) + 'x mean_value_of_background in image with index'
+ str(i) + ' is estimated to be: ' + str(mean_value_of_background))
list_of_mean_value_of_background.append(mean_value_of_background)
list_of_flux_mask = []
for i in range(len(list_of_sci_images)):
sci_image = list_of_sci_images[i]
var_image = list_of_var_images[i]
flux_mask = sci_image > (list_of_mean_value_of_background[i])
# normalized science image
sci_image_std = sci_image / np.sqrt(var_image)
list_of_sci_image_std.append(sci_image_std)
list_of_flux_mask.append(flux_mask)
# find postions for focus image in the raveled images
if len(list_of_flux_mask) > 1:
len_of_flux_masks = np.array(list(map(np.sum, list_of_flux_mask)))
position_of_most_focus_image = np.where(len_of_flux_masks == np.min(len_of_flux_masks))[0][0]
position_focus_1 = np.sum(len_of_flux_masks[:position_of_most_focus_image])
position_focus_2 = np.sum(len_of_flux_masks[:position_of_most_focus_image + 1])
self.list_of_flux_mask = list_of_flux_mask
self.list_of_sci_image_std = list_of_sci_image_std
##########################################################################
# masked science image
list_of_I = []
list_of_I_std = []
list_of_std_image = []
for i in range(len(list_of_sci_images)):
sci_image = list_of_sci_images[i]
sci_image_std = list_of_sci_image_std[i]
flux_mask = list_of_flux_mask[i]
std_image = np.sqrt(list_of_var_images[i][flux_mask]).ravel()
# using variable name `I` to match the original source paper
I = sci_image[flux_mask].ravel() # noqa: E741
# I=((sci_image[flux_mask])/np.sum(sci_image[flux_mask])).ravel()
I_std = ((sci_image_std[flux_mask]) / 1).ravel()
# I_std=((sci_image_std[flux_mask])/np.sum(sci_image_std[flux_mask])).ravel()
list_of_I.append(I)
list_of_std_image.append(std_image)
list_of_I_std.append(I_std)
# addition May22
# array_of_sci_image_std = np.array(list_of_sci_image_std)
list_of_std_sum = []
for i in range(len(list_of_sci_image_std)):
list_of_std_sum.append(np.sum(list_of_std_image[i]))
array_of_std_sum = np.array(list_of_std_sum)
array_of_std_sum = array_of_std_sum / np.min(array_of_std_sum)
list_of_std_image_renormalized = []
for i in range(len(list_of_std_image)):
list_of_std_image_renormalized.append(list_of_std_image[i] * array_of_std_sum[i])
#
uber_std = [item for sublist in list_of_std_image_renormalized for item in sublist]
# join all I,I_std from all individual images into one uber I,I_std
uber_I = [item for sublist in list_of_I for item in sublist]
# uber_std=[item for sublist in list_of_std_image for item in sublist]
# uber_I_std=[item for sublist in list_of_I_std for item in sublist]
uber_I = np.array(uber_I)
uber_std = np.array(uber_std)
uber_I_std = uber_I / uber_std
if self.save:
np.save('/tigress/ncaplar/Results/list_of_sci_images_' + str(num_iter),
list_of_sci_images)
np.save('/tigress/ncaplar/Results/list_of_mean_value_of_background_' + str(num_iter),
list_of_mean_value_of_background)
np.save('/tigress/ncaplar/Results/list_of_flux_mask_' + str(num_iter),
list_of_flux_mask)
np.save('/tigress/ncaplar/Results/uber_std_' + str(num_iter),
uber_std)
np.save('/tigress/ncaplar/Results/uber_I_' + str(num_iter),
uber_I)
# March 14, 2022, adding just pure avoid of the run
if up_to_which_z is False:
# 0. likelihood averaged over all images (before the function)
# 1. likelihood averaged over all images (before the function)
# 2. likelihood per image (output from model_multi) (before the function)
# 3. likelihood per image (output from model_multi) (before the function)
# 4. out_images
# 5. list of initial model images
# 6. list of initial model images
# 7. parametrization before the function
# 8. parametrization after the function
# 9. list of parameters per image (before the function)
# 10. list of parameters per image (after the function)
# 11. list of chi2 per image (before the function)
# 12. list of chi2 per image (after the function)
# 13. list of psf position of image (before the function)
# 14. list of psf position of image (after the function)
initial_model_result, list_of_initial_model_result, list_of_image_0,\
list_of_initial_input_parameters, list_of_pre_chi2, list_of_psf_positions =\
pre_model_result, model_results, pre_images, pre_input_parameters,\
chi_2_before_iteration_array, list_of_psf_positions
if previous_best_result is None:
return initial_model_result, initial_model_result,\
list_of_initial_model_result, list_of_initial_model_result,\
None, pre_images, pre_images,\
allparameters_parametrization_proposal,\
allparameters_parametrization_proposal,\
list_of_initial_input_parameters, list_of_initial_input_parameters,\
list_of_pre_chi2, list_of_pre_chi2,\
list_of_psf_positions, list_of_psf_positions,\
[None, None, None, None, None]
else:
return initial_model_result, initial_model_result,\
list_of_initial_model_result, list_of_initial_model_result,\
None, pre_images, pre_images,\
allparameters_parametrization_proposal,\
allparameters_parametrization_proposal,\
list_of_initial_input_parameters, list_of_initial_input_parameters,\
list_of_pre_chi2, list_of_pre_chi2,\
list_of_psf_positions, list_of_psf_positions
# set number of extra Zernike
# number_of_extra_zernike=0
# twentytwo_or_extra=22
# numbers that make sense are 11,22,37,56,79,106,137,172,211,254
# if number_of_extra_zernike is None:
# number_of_extra_zernike=0
# else:
number_of_extra_zernike = self.zmax - 22
##########################################################################
# Start of the iterative process
number_of_non_decreses = [0]
for iteration_number in range(1):
if iteration_number == 0:
# initial SVD treshold
thresh0 = 0.02
else:
pass
##########################################################################
# starting real iterative process here
# create changes in parametrizations
# list of how much to move Zernike coefficents
# list_of_delta_z=[]
# for z_par in range(3,22+number_of_extra_zernike):
# list_of_delta_z.append(0.5/((np.sqrt(8.*(z_par+1.)-6.)-1.)/2.))
# list of how much to move Zernike coefficents
# possibly needs to me modified to be smarther and take into account that
# every second parameter gets ``amplified'' in defocus
# list_of_delta_z_parametrizations=[]
# for z_par in range(0,19*2+2*number_of_extra_zernike):
# list_of_delta_z_parametrizations.append(0.5/((np.sqrt(8.*(z_par+1.)-6.)-1.)/2.))
# this should produce reasonable changes in multi analysis
list_of_delta_z_parametrizations = []
for z_par in range(0, 19 * 2 + 2 * number_of_extra_zernike):
z_par_i = z_par + 4
# if this is the parameter that change
if np.mod(z_par_i, 2) == 0:
list_of_delta_z_parametrizations.append(0.1 * 0.05 / np.sqrt(z_par_i))
if np.mod(z_par_i, 2) == 1:
list_of_delta_z_parametrizations.append(0.05 / np.sqrt(z_par_i))
array_of_delta_z_parametrizations = np.array(list_of_delta_z_parametrizations) * (1)
if iteration_number == 0:
pass
else:
# array_of_delta_z_parametrizations=first_proposal_Tokovnin/4
array_of_delta_z_parametrizations = np.maximum(
array_of_delta_z_parametrizations, first_proposal_Tokovnin / 4) # noqa
# this code might work with global parameters?
array_of_delta_global_parametrizations = np.array([0.1, 0.02, 0.1, 0.1, 0.1, 0.1,
0.3, 1, 0.1, 0.1,
0.15, 0.15, 0.1,
0.07, 0.05, 0.05, 0.4,
30000, 0.5, 0.001,
0.05, 0.05, 0.01])
# array_of_delta_global_parametrizations=array_of_delta_global_parametrizations/1
array_of_delta_global_parametrizations = array_of_delta_global_parametrizations / 10
if move_allparameters:
array_of_delta_all_parametrizations = np.concatenate(
(array_of_delta_z_parametrizations[0:19 * 2], array_of_delta_global_parametrizations,
array_of_delta_z_parametrizations[19 * 2:]))
if self.save:
np.save('/tigress/ncaplar/Results/array_of_delta_z_parametrizations_'
+ str(num_iter) + '_' + str(iteration_number), array_of_delta_z_parametrizations)
np.save('/tigress/ncaplar/Results/array_of_delta_global_parametrizations_'
+ str(num_iter) + '_' + str(iteration_number), array_of_delta_global_parametrizations)
if move_allparameters:
np.save('/tigress/ncaplar/Results/array_of_delta_all_parametrizations_'
+ str(num_iter) + '_' + str(iteration_number),
array_of_delta_all_parametrizations)
# initialize
# if this is the first iteration of the iterative algorithm
if iteration_number == 0:
thresh = thresh0
all_global_parametrization_old = allparameters_parametrization_proposal[19 * 2:19 * 2 + 23]
if number_of_extra_zernike == 0:
all_wavefront_z_parametrization_old = allparameters_parametrization_proposal[0:19 * 2]
else:
# if you want more Zernike
if len(allparameters_parametrization_proposal) == 19 * 2 + 23:
# if you did not pass explicit extra Zernike, start with zeroes
all_wavefront_z_parametrization_old = np.concatenate(
(allparameters_parametrization_proposal[0:19 * 2],
np.zeros(2 * number_of_extra_zernike)))
else:
all_wavefront_z_parametrization_old = np.concatenate(
(allparameters_parametrization_proposal[0:19 * 2],
allparameters_parametrization_proposal[19 * 2 + 23:]))
pass
# if this is not a first iteration
else:
# errors in the typechecker for 10 lines below are fine
if self.verbosity == 1:
logging.info('array_of_delta_z in ' + str(iteration_number) + ' '
+ str(array_of_delta_z_parametrizations))
# code analysis programs might suggest that there is an error here, but everything is ok
# chi_2_before_iteration=np.copy(chi_2_after_iteration)
# copy wavefront from the end of the previous iteration
all_wavefront_z_parametrization_old = np.copy(all_wavefront_z_parametrization_new) # noqa
if move_allparameters:
all_global_parametrization_old = np.copy(all_global_parametrization_new) # noqa
if self.verbosity >= 1:
if did_chi_2_improve == 1: # noqa
logging.info('did_chi_2_improve: yes')
else:
logging.info('did_chi_2_improve: no')
if did_chi_2_improve == 0: # noqa
thresh = thresh0
else:
thresh = thresh * 0.5
##########################################################################
# create a model with input parameters from previous iteration
list_of_all_wavefront_z_parameterization = []
up_to_z22_parametrization_start = all_wavefront_z_parametrization_old[0:19 * 2]
from_z22_parametrization_start = all_wavefront_z_parametrization_old[19 * 2:]
global_parametrization_start = all_global_parametrization_old
if self.verbosity >= 1:
logging.info('up_to_z22_parametrization_start: ' + str(up_to_z22_parametrization_start))
logging.info('nonwavefront_par: ' + str(nonwavefront_par))
logging.info('from_z22_parametrization_start' + str(from_z22_parametrization_start))
# logging.info('iteration '+str(iteration_number)+' shape of up_to_z22_parametrization_start is:
# '+str(up_to_z22_parametrization_start.shape))
if move_allparameters:
initial_input_parameterization = np.concatenate(
(up_to_z22_parametrization_start, global_parametrization_start,
from_z22_parametrization_start))
else:
initial_input_parameterization = np.concatenate(
(up_to_z22_parametrization_start, nonwavefront_par, from_z22_parametrization_start))
if self.verbosity >= 1:
logging.info(
'initial input parameters in iteration ' + str(iteration_number) + ' are: '
+ str(initial_input_parameterization))
logging.info(
'moving input wavefront parameters in iteration ' + str(iteration_number) + ' by: '
+ str(array_of_delta_z_parametrizations))
if move_allparameters:
logging.info(
'moving global input parameters in iteration ' + str(iteration_number) + ' by: '
+ str(array_of_delta_global_parametrizations))
if self.save:
np.save('/tigress/ncaplar/Results/initial_input_parameterization_'
+ str(num_iter) + '_' + str(iteration_number), initial_input_parameterization)
# logging.info('len initial_input_parameterization '+str(len(initial_input_parameterization)))
list_of_minchain = model_multi.create_list_of_allparameters(
initial_input_parameterization, list_of_defocuses=list_of_defocuses_input_long,
zmax=self.zmax)
# list_of_minchain=model_multi.create_list_of_allparameters(allparameters_parametrization_proposal,list_of_defocuses=list_of_defocuses_input_long,zmax=56)
# moved in under `else` statment
# res_multi=model_multi(list_of_minchain,return_Images=True,use_only_chi=use_only_chi,\
# multi_background_factor=multi_background_factor)
# if this is the first iteration take over the results from premodel run
if iteration_number == 0:
initial_model_result, list_of_initial_model_result, list_of_image_0,\
list_of_initial_input_parameters, list_of_pre_chi2, list_of_psf_positions =\
pre_model_result, model_results, pre_images, pre_input_parameters,\
chi_2_before_iteration_array, list_of_psf_positions
else:
res_multi = model_multi(list_of_minchain, return_Images=True, use_only_chi=use_only_chi,
multi_background_factor=multi_background_factor)
# mean_res_of_multi_same_spot_proposal,list_of_single_res_proposal,list_of_single_model_image_proposal,\
# list_of_single_allparameters_proposal,list_of_single_chi_results_proposal=res_multi
initial_model_result, list_of_initial_model_result, list_of_image_0,\
list_of_initial_input_parameters, list_of_pre_chi2, list_of_psf_positions = res_multi
# modify variance image according to the models that have just been created
# second time modifying variance image
list_of_single_model_image = list_of_image_0
list_of_var_images_via_model = []
for index_of_single_image in range(len(list_of_sci_images)):
popt = create_popt_for_custom_var(self.list_of_sci_images[index_of_single_image],
self.list_of_var_images[index_of_single_image],
self.list_of_mask_images[index_of_single_image])
single_var_image_via_model =\
create_custom_var_from_popt(list_of_single_model_image[index_of_single_image], popt)
list_of_var_images_via_model.append(single_var_image_via_model)
# replace the variance images provided with these custom variance images
list_of_var_images = list_of_var_images_via_model
# self.list_of_var_images = list_of_var_images
# initial_model_result,image_0,initial_input_parameters,pre_chi2=model(initial_input_parameters,return_Image=True,return_intermediate_images=False)
if self.save:
np.save('/tigress/ncaplar/Results/list_of_initial_model_result_'
+ str(num_iter) + '_' + str(iteration_number), list_of_initial_model_result)
np.save('/tigress/ncaplar/Results/list_of_image_0_' + str(num_iter) + '_'
+ str(iteration_number), list_of_image_0)
np.save('/tigress/ncaplar/Results/list_of_initial_input_parameters_'
+ str(num_iter) + '_' + str(iteration_number), list_of_initial_input_parameters)
np.save('/tigress/ncaplar/Results/list_of_pre_chi2_' + str(num_iter) + '_'
+ str(iteration_number), list_of_pre_chi2)
np.save('/tigress/ncaplar/Results/list_of_psf_positions_' + str(num_iter) + '_'
+ str(iteration_number), list_of_psf_positions)
##########################################################################
# divided model images by their standard deviations
list_of_image_0_std = []
for i in range(len(list_of_image_0)):
# normalizing by standard deviation image
# May 22 modification
STD = np.sqrt(list_of_var_images[i]) * array_of_std_sum[i]
image_0 = list_of_image_0[i]
list_of_image_0_std.append(image_0 / STD)
##########################################################################
# updated science images divided by std (given that we created new custom
# variance images, via model)
##########################################################################
# mask model images at the start of this iteration, before modifying parameters
# create uber_M0
list_of_M0 = []
list_of_M0_std = []
for i in range(len(list_of_image_0_std)):
image_0 = list_of_image_0[i]
image_0_std = list_of_image_0_std[i]
flux_mask = list_of_flux_mask[i]
# what is list_of_mask_images?
M0 = image_0[flux_mask].ravel()
# M0=((image_0[flux_mask])/np.sum(image_0[flux_mask])).ravel()
M0_std = ((image_0_std[flux_mask]) / 1).ravel()
# M0_std=((image_0_std[flux_mask])/np.sum(image_0_std[flux_mask])).ravel()
list_of_M0.append(M0)
list_of_M0_std.append(M0_std)
# join all M0,M0_std from invidiual images into one uber M0,M0_std
uber_M0 = [item for sublist in list_of_M0 for item in sublist]
uber_M0_std = [item for sublist in list_of_M0_std for item in sublist]
uber_M0 = np.array(uber_M0)
uber_M0_std = np.array(uber_M0_std)
# uber_M0=uber_M0/np.sum(uber_M0)
# uber_M0_std=uber_M0_std/np.sum(uber_M0_std)
self.uber_M0 = uber_M0
self.uber_M0_std = uber_M0_std
if self.save:
np.save('/tigress/ncaplar/Results/uber_M0_' + str(num_iter) + '_' + str(iteration_number),
uber_M0)
np.save('/tigress/ncaplar/Results/uber_M0_std_' + str(num_iter) + '_' + str(iteration_number),
uber_M0_std)
##########################################################################
# difference between model (uber_M0) and science (uber_I) at start of this iteration
# non-std version
# not used, that is ok, we are at the moment using std version
IM_start = np.sum(np.abs(np.array(uber_I) - np.array(uber_M0)))
# std version
IM_start_std = np.sum(np.abs(np.array(uber_I_std) - np.array(uber_M0_std)))
if len(list_of_flux_mask) > 1:
IM_start_focus = np.sum(
np.abs(np.array(uber_I) - np.array(uber_M0))[position_focus_1:position_focus_2])
IM_start_std_focus = np.sum(
np.abs(np.array(uber_I_std) - np.array(uber_M0_std))[position_focus_1:position_focus_2])
# mean of differences of our images - should we use mean?; probably not... needs to be normalized?
unitary_IM_start = np.mean(IM_start)
unitary_IM_start_std = np.mean(IM_start_std)
# logging.info list_of_IM_start_std
if self.verbosity == 1:
logging.info('np.sum(np.abs(I-M0)) before iteration ' + str(num_iter)
+ '_' + str(iteration_number) + ': ' + str(unitary_IM_start))
logging.info('np.sum(np.abs(I_std-M0_std)) before iteration ' + str(num_iter)
+ '_' + str(iteration_number) + ': ' + str(unitary_IM_start_std))
# logging.info('np.sum(np.abs(I_std-M0_std)) before iteration '+str(iteration_number)+':
# '+str(unitary_IM_start_std))
##########################################################################
# create list of new parametrizations to be tested
# combine the old wavefront parametrization with the delta_z_parametrization
# create two lists:
# 1. one contains only wavefront parametrizations
# 2. second contains the whole parametrizations
# logging.info('checkpoint 0')
if move_allparameters:
list_of_all_wavefront_z_parameterization = []
list_of_input_parameterizations = []
for z_par in range(19 * 2):
all_wavefront_z_parametrization_list = np.copy(all_wavefront_z_parametrization_old)
all_wavefront_z_parametrization_list[z_par] =\
all_wavefront_z_parametrization_list[z_par] + \
array_of_delta_z_parametrizations[z_par]
list_of_all_wavefront_z_parameterization.append(all_wavefront_z_parametrization_list)
up_to_z22_start = all_wavefront_z_parametrization_list[0:19 * 2]
from_z22_start = all_wavefront_z_parametrization_list[19 * 2:]
parametrization_proposal = np.concatenate(
(up_to_z22_start, nonwavefront_par, from_z22_start))
# actually it is parametrization
list_of_input_parameterizations.append(parametrization_proposal)
# logging.info('checkpoint 1')
for g_par in range(23):
all_global_parametrization_list = np.copy(all_global_parametrization_old)
all_global_parametrization_list[g_par] = all_global_parametrization_list[g_par] + \
array_of_delta_global_parametrizations[g_par]
# list_of_all_wavefront_z_parameterization.append(all_wavefront_z_parametrization_list)
up_to_z22_start = all_wavefront_z_parametrization_old[0:19 * 2]
from_z22_start = all_wavefront_z_parametrization_old[19 * 2:]
parametrization_proposal = np.concatenate(
(up_to_z22_start, all_global_parametrization_list, from_z22_start))
# actually it is parametrization
list_of_input_parameterizations.append(parametrization_proposal)
# logging.info('checkpoint 2')
for z_par in range(19 * 2, len(all_wavefront_z_parametrization_old)):
all_wavefront_z_parametrization_list = np.copy(all_wavefront_z_parametrization_old)
all_wavefront_z_parametrization_list[z_par] =\
all_wavefront_z_parametrization_list[z_par] + \
array_of_delta_z_parametrizations[z_par]
list_of_all_wavefront_z_parameterization.append(all_wavefront_z_parametrization_list)
up_to_z22_start = all_wavefront_z_parametrization_list[0:19 * 2]
from_z22_start = all_wavefront_z_parametrization_list[19 * 2:]
parametrization_proposal = np.concatenate(
(up_to_z22_start, nonwavefront_par, from_z22_start))
# actually it is parametrization
list_of_input_parameterizations.append(parametrization_proposal)
# logging.info('checkpoint 3')
else:
list_of_all_wavefront_z_parameterization = []
list_of_input_parameterizations = []
for z_par in range(len(all_wavefront_z_parametrization_old)):
all_wavefront_z_parametrization_list = np.copy(all_wavefront_z_parametrization_old)
all_wavefront_z_parametrization_list[z_par] =\
all_wavefront_z_parametrization_list[z_par] + \
array_of_delta_z_parametrizations[z_par]
list_of_all_wavefront_z_parameterization.append(all_wavefront_z_parametrization_list)
up_to_z22_start = all_wavefront_z_parametrization_list[0:19 * 2]
from_z22_start = all_wavefront_z_parametrization_list[19 * 2:]
parametrization_proposal = np.concatenate(
(up_to_z22_start, nonwavefront_par, from_z22_start))
# actually it is parametrization
list_of_input_parameterizations.append(parametrization_proposal)
# logging.info('checkpoint 4')
##########################################################################
# Starting testing new set of parameters
# Creating new images
out_ln = []
out_ln_ind = []
out_images = []
out_parameters = []
out_chi2 = []
out_pfs_positions = []
if self.verbosity >= 1:
logging.info(
'We are now inside of the pool loop number ' + str(iteration_number)
+ ' with num_iter: ' + str(num_iter))
# actually it is parametrization
# list of (56-3)*2 sublists, each one with (56-3)*2 + 23 values
time_start = time.time()
# This assume that Zernike parameters go up to 56
# I need to pass each of 106 parametrization to model_multi BUT
# model_multi actually takes list of parameters, not parametrizations
# I need list that has 106 sublists, each one of those being 9x(53+23)
# 9 == number of images
# 53 == number of Zernike parameters (56-3)
# 23 == number of global parameters
uber_list_of_input_parameters = []
for i in range(len(list_of_input_parameterizations)):
list_of_input_parameters = model_multi.create_list_of_allparameters(
list_of_input_parameterizations[i],
list_of_defocuses=list_of_defocuses_input_long, zmax=self.zmax)
uber_list_of_input_parameters.append(list_of_input_parameters)
# save the uber_list_of_input_parameters
if self.save:
np.save('/tigress/ncaplar/Results/uber_list_of_input_parameters_'
+ str(num_iter) + '_' + str(iteration_number), uber_list_of_input_parameters)
# pass new model_multi that has fixed pos (October 6, 2020)
# should have same paramter as staring model_multi, apart from
# list_of_psf_positions (maybe variance?, but prob not)
model_multi_out = LN_PFS_multi_same_spot(
list_of_sci_images,
list_of_var_images,
list_of_mask_images=list_of_mask_images,
wavelength=self.wavelength,
dithering=self.dithering,
save=self.save,
zmax=self.zmax,
verbosity=self.verbosity_model,
double_sources=self.double_sources,
double_sources_positions_ratios=double_sources_positions_ratios,
npix=self.npix,
fit_for_flux=self.fit_for_flux,
test_run=self.test_run,
list_of_psf_positions=list_of_psf_positions)
if move_allparameters:
self.array_of_delta_all_parametrizations = array_of_delta_all_parametrizations
else:
self.array_of_delta_z_parametrizations = array_of_delta_z_parametrizations
# start of creating H
# H is normalized difference between pixels of the model image
# that result from changing the j-th Zernike term compared to the original image
# This is expensive because we have to generate new image for each Zernike term
if previous_best_result is None:
if self.verbosity >= 1:
logging.info('self.pool parameter is: ' + str(self.pool))
# generate images
if self.pool is None:
out1 = map(
partial(
model_multi_out,
return_Images=True,
use_only_chi=use_only_chi,
multi_background_factor=multi_background_factor),
uber_list_of_input_parameters)
else:
out1 = self.pool.map(
partial(
model_multi_out,
return_Images=True,
use_only_chi=use_only_chi,
multi_background_factor=multi_background_factor),
uber_list_of_input_parameters)
out1 = list(out1)
time_end = time.time()
if self.verbosity >= 1:
logging.info('time_end-time_start for creating model_multi_out '
+ str(time_end - time_start))
# normalization of the preinput run? (what did I mean by that)
pre_input_parameters = np.array(pre_input_parameters)
if self.verbosity >= 1:
logging.info('pre_input_parameters.shape ' + str(pre_input_parameters.shape))
logging.info('pre_input_parameters[0][0:5] ' + str(pre_input_parameters[0][0:5]))
# select the column specifying the flux normalization from the input images
array_of_normalizations_pre_input = pre_input_parameters[:, 41]
# out1=a_pool.map(model,input_parameters,repeat(True))
for i in range(len(uber_list_of_input_parameters)):
# logging.info(i)
# initial_model_result,list_of_initial_model_result,list_of_image_0,\
# list_of_initial_input_parameters,list_of_pre_chi2
# outputs are
# 0. mean likelihood
# 1. list of individual res (likelihood)
# 2. list of science images
# 3. list of parameters used
# 4. list of quality measurments
out_images_pre_renormalization = np.array(out1[i][2])
out_parameters_single_move = np.array(out1[i][3])
# replace the normalizations in the output imags with the normalizations
# from the input images
array_of_normalizations_out = out_parameters_single_move[:, 41]
out_renormalization_parameters = array_of_normalizations_pre_input /\
array_of_normalizations_out
out_ln.append(out1[i][0])
out_ln_ind.append(out1[i][1])
# logging.info('out_images_pre_renormalization.shape: '+
# str(out_images_pre_renormalization.shape))
# logging.info('out_renormalization_parameters.shape: '+
# str(out_renormalization_parameters.shape))
# np.save('/tigress/ncaplar/Results/out_images_pre_renormalization',
# out_images_pre_renormalization)
out_images_step = []
for lv in range(len(out_renormalization_parameters)):
out_images_step.append(
out_images_pre_renormalization[lv]
* out_renormalization_parameters[lv])
out_images.append(out_images_step)
out_parameters.append(out1[i][3])
out_chi2.append(out1[i][4])
out_pfs_positions.append(out1[i][5])
# We use these out_images to study the differences due to changing parameters;
# We do not want the normalization to affect things (and position of optical center)
# so we renormalized to that multiplication constants are the same as in the input
time_end = time.time()
if self.verbosity >= 1:
logging.info('time_end-time_start for whole model_multi_out '
+ str(time_end - time_start))
if self.save:
np.save(
'/tigress/ncaplar/Results/out_images_' + str(num_iter) + '_'
+ str(iteration_number), out_images)
np.save(
'/tigress/ncaplar/Results/out_parameters_' + str(num_iter) + '_'
+ str(iteration_number), out_parameters)
np.save(
'/tigress/ncaplar/Results/out_chi2_' + str(num_iter) + '_'
+ str(iteration_number), out_chi2)
##########################################################################
# Normalize created images
# We created ((zmax-3)*2) x N images, where N is the number of defocused images
# join all images together
list_of_images_normalized_uber = []
# list_of_images_normalized_std_uber = []
# go over (zmax-3)*2 images
for j in range(len(out_images)):
# two steps for what could have been achived in one, but to ease up
# transition from previous code
out_images_single_parameter_change = out_images[j]
optpsf_list = out_images_single_parameter_change
# flux image has to correct per image
# mask images that have been created in the fitting procedure with the
# appropriate flux mask
images_normalized = []
for i in range(len(optpsf_list)):
flux_mask = list_of_flux_mask[i]
images_normalized.append((optpsf_list[i][flux_mask]).ravel())
images_normalized_flat = [item for sublist in images_normalized for item in sublist]
images_normalized_flat = np.array(images_normalized_flat)
# list of (zmax-3)*2 raveled images
list_of_images_normalized_uber.append(images_normalized_flat)
# same but divided by STD
# images_normalized_std=[]
# for i in range(len(optpsf_list)):
# seems that I am a bit more verbose here with my definitions
# optpsf_list_i=optpsf_list[i]
# do I want to generate new STD images, from each image?
# May 22 modification
# STD=list_of_sci_image_std[i]*array_of_std_sum[i]
# optpsf_list_i_STD=optpsf_list_i/STD
# flux_mask=list_of_flux_mask[i]
# images_normalized_std.append((optpsf_list_i_STD[flux_mask]/np.sum(optpsf_list_i_STD[flux_mask])).ravel())
# join all images together
# images_normalized_std_flat=
# [item for sublist in images_normalized_std for item in sublist]
# normalize so that the sum is still one
# images_normalized_std_flat=np.array(images_normalized_std_flat)/len(optpsf_list)
# list_of_images_normalized_std_uber.append(images_normalized_std_flat)
# create uber images_normalized,images_normalized_std
# images that have zmax*2 rows and very large number of columns (number of
# non-masked pixels from all N images)
uber_images_normalized = np.array(list_of_images_normalized_uber)
# uber_images_normalized_std=np.array(list_of_images_normalized_std_uber)
if self.save:
np.save(
'/tigress/ncaplar/Results/uber_images_normalized_' + str(num_iter) + '_'
+ str(iteration_number), uber_images_normalized)
# np.save('/tigress/ncaplar/Results/uber_images_normalized_std_'+str(num_iter)+'_'+str(iteration_number),\
# uber_images_normalized_std)
# single_wavefront_parameter_list=[]
# for i in range(len(out_parameters)):
# single_wavefront_parameter_list.
# append(np.concatenate((out_parameters[i][:19],out_parameters[i][42:])) )
##########################################################################
# Core Tokovinin algorithm
if self.verbosity >= 1:
logging.info('images_normalized (uber).shape: ' + str(uber_images_normalized.shape))
logging.info('array_of_delta_z_parametrizations[:,None].shape'
+ str(array_of_delta_z_parametrizations[:, None].shape))
# equation A1 from Tokovinin 2006
# new model minus old model
if move_allparameters:
H = np.transpose(np.array((uber_images_normalized - uber_M0))
/ array_of_delta_all_parametrizations[:, None])
# H_std=np.transpose(np.array((uber_images_normalized_std-uber_M0_std))/\
# array_of_delta_z_parametrizations[:,None])
H_std = np.transpose(np.array((uber_images_normalized - uber_M0))
/ array_of_delta_all_parametrizations[:, None]) /\
uber_std.ravel()[:, None]
else:
H = np.transpose(np.array((uber_images_normalized - uber_M0))
/ array_of_delta_z_parametrizations[:, None])
# H_std=np.transpose(np.array((uber_images_normalized_std-uber_M0_std))/array_of_delta_z_parametrizations[:,None])
H_std = np.transpose(np.array((uber_images_normalized - uber_M0))
/ array_of_delta_z_parametrizations[:, None]) /\
uber_std.ravel()[:, None]
array_of_delta_z_parametrizations_None = np.copy(array_of_delta_z_parametrizations[:, None])
else:
H = self.create_simplified_H(previous_best_result)
H_std = H / uber_std.ravel()[:, None]
# end of creating H
if self.save and previous_best_result is None:
np.save('/tigress/ncaplar/Results/array_of_delta_z_parametrizations_None_'
+ str(num_iter) + '_' + str(iteration_number),
array_of_delta_z_parametrizations_None)
if self.save:
np.save('/tigress/ncaplar/Results/H_' + str(num_iter) + '_' + str(iteration_number), H)
if self.save:
np.save('/tigress/ncaplar/Results/H_std_' + str(num_iter) + '_' + str(iteration_number),
H_std)
first_proposal_Tokovnin, first_proposal_Tokovnin_std = self.create_first_proposal_Tokovnin(
H, H_std, uber_I, uber_M0, uber_std, up_to_which_z=up_to_which_z)
"""
#logging.info('np.mean(H,axis=0).shape)'+str(np.mean(H,axis=0).shape))
singlular_parameters=np.arange(H.shape[1])[np.abs((np.mean(H,axis=0)))<0.01]
non_singlular_parameters=np.arange(H.shape[1])[np.abs((np.mean(H,axis=0)))>0.01]
#logging.info('non_singlular_parameters.shape)'+str(non_singlular_parameters.shape))
H=H[:,non_singlular_parameters]
H_std=H_std[:,non_singlular_parameters]
HHt=np.matmul(np.transpose(H),H)
HHt_std=np.matmul(np.transpose(H_std),H_std)
#logging.info('svd thresh is '+str(thresh))
#invHHt=svd_invert(HHt,thresh)
#invHHt_std=svd_invert(HHt_std,thresh)
invHHt=np.linalg.inv(HHt)
invHHt_std=np.linalg.inv(HHt_std)
invHHtHt=np.matmul(invHHt,np.transpose(H))
invHHtHt_std=np.matmul(invHHt_std,np.transpose(H_std))
# I is uber_I now (science images)
# M0 is uber_M0 now (set of models before the iteration)
first_proposal_Tokovnin=np.matmul(invHHtHt,uber_I-uber_M0)
#first_proposal_Tokovnin_std=np.matmul(invHHtHt_std,uber_I_std-uber_M0_std)
first_proposal_Tokovnin_std=np.matmul(invHHtHt_std,(uber_I-uber_M0)/uber_std.ravel())
# if you have removed certain parameters because of the singularity,
return them here, with no change
if len(singlular_parameters)>0:
for i in range(len(singlular_parameters)):
first_proposal_Tokovnin=np.insert(first_proposal_Tokovnin,singlular_parameters[i],0)
first_proposal_Tokovnin_std=np.insert(first_proposal_Tokovnin_std,singlular_parameters[i],0)
#logging.info('first_proposal_Tokovnin_std'+str(first_proposal_Tokovnin_std.shape))
#logging.info('invHHtHt_std.shape'+str(invHHtHt_std.shape))
"""
if self.verbosity >= 1:
logging.info('first_proposal_Tokovnin[:5] is: '
+ str(first_proposal_Tokovnin[:8 * 2]))
logging.info('first_proposal_Tokovnin_std[:5] is: '
+ str(first_proposal_Tokovnin_std[:8 * 2]))
try:
logging.info('ratio is of proposed to initial parameters (std) is: '
+ str(first_proposal_Tokovnin_std / array_of_delta_z_parametrizations))
except BaseException:
pass
# Tokovnin_proposal=0.7*first_proposal_Tokovnin
if move_allparameters:
Tokovnin_proposal = np.zeros((129,))
# Tokovnin_proposal[non_singlular_parameters]=0.7*first_proposal_Tokovnin_std
Tokovnin_proposal[non_singlular_parameters] = 1 * first_proposal_Tokovnin_std # noqa
all_parametrization_new = np.copy(initial_input_parameterization)
allparameters_parametrization_proposal_after_iteration_before_global_check =\
all_parametrization_new + Tokovnin_proposal
# tests if the global parameters would be out of bounds - if yes, reset
# them to the limit values
# noqa: E501 - breaking line limit in order to keep informative names
global_parametrization_proposal_after_iteration_before_global_check =\
allparameters_parametrization_proposal_after_iteration_before_global_check[19 * 2:19 * 2 + 23] # noqa: E501
checked_global_parameters = check_global_parameters(
global_parametrization_proposal_after_iteration_before_global_check, test_print=1)
allparameters_parametrization_proposal_after_iteration = np.copy(
allparameters_parametrization_proposal_after_iteration_before_global_check)
allparameters_parametrization_proposal_after_iteration[19 * 2:19 * 2 + 23] =\
checked_global_parameters
else:
# Tokovnin_proposal=0.7*first_proposal_Tokovnin_std
Tokovnin_proposal = 1 * first_proposal_Tokovnin_std
if self.verbosity >= 1:
logging.info('Tokovnin_proposal[:5] is: ' + str(Tokovnin_proposal[:5]))
if self.zmax > 35:
logging.info('Tokovnin_proposal[38:43] is: ' + str(Tokovnin_proposal[38:43]))
# logging.info('all_wavefront_z_parametrization_old in '+str(iteration_number)+' '+
# str(all_wavefront_z_parametrization_old[:5]))
# logging.info('Tokovnin_proposal[:5] is: '+str(Tokovnin_proposal[:5]))
# logging.info('Tokovnin_proposal.shape '+str(Tokovnin_proposal.shape))
# if the Tokovinin proposal is not made, return the initial result
if len(Tokovnin_proposal) < 10:
# return initial_model_result,list_of_initial_model_result,list_of_image_0,\
# allparameters_parametrization_proposal,list_of_initial_input_parameters,list_of_pre_chi2,list_of_psf_positions
return initial_model_result, initial_model_result,\
list_of_initial_model_result, list_of_initial_model_result,\
out_images, list_of_image_0, list_of_image_0,\
allparameters_parametrization_proposal, allparameters_parametrization_proposal,\
list_of_initial_input_parameters, list_of_initial_input_parameters,\
list_of_pre_chi2, list_of_pre_chi2,\
list_of_psf_positions, list_of_psf_positions
break
# logging.info('std of Tokovnin_proposal is: '+str(np.std(Tokovnin_proposal)))
if move_allparameters:
# all_wavefront_z_parametrization_new=np.copy(all_wavefront_z_parametrization_old)
# all_global_parametrization_new=np.copy(all_global_parametrization_old)
# all_parametrization_new=np.copy(initial_input_parameterization)
# allparameters_parametrization_proposal_after_iteration=all_parametrization_new+Tokovnin_proposal
up_to_z22_end = allparameters_parametrization_proposal_after_iteration[:19 * 2]
from_z22_end = allparameters_parametrization_proposal_after_iteration[19 * 2 + 23:]
all_wavefront_z_parametrization_new = np.concatenate((up_to_z22_end, from_z22_end))
# all_global_parametrization_new = allparameters_parametrization_proposal_after_iteration[
# 19 * 2:19 * 2 + 23]
else:
all_wavefront_z_parametrization_new = np.copy(all_wavefront_z_parametrization_old)
all_wavefront_z_parametrization_new = all_wavefront_z_parametrization_new + Tokovnin_proposal
up_to_z22_end = all_wavefront_z_parametrization_new[:19 * 2]
from_z22_end = all_wavefront_z_parametrization_new[19 * 2:]
allparameters_parametrization_proposal_after_iteration = np.concatenate(
(up_to_z22_end, nonwavefront_par, from_z22_end))
if self.save:
np.save(
'/tigress/ncaplar/Results/first_proposal_Tokovnin' + str(num_iter) + '_'
+ str(iteration_number), first_proposal_Tokovnin)
np.save(
'/tigress/ncaplar/Results/first_proposal_Tokovnin_std' + str(num_iter) + '_'
+ str(iteration_number), first_proposal_Tokovnin_std)
np.save(
'/tigress/ncaplar/Results/allparameters_parametrization_proposal_after_iteration_'
+ str(num_iter) + '_' + str(iteration_number),
allparameters_parametrization_proposal_after_iteration)
#########################
# Creating single exposure with new proposed parameters and seeing if there is improvment
time_start_final = time.time()
list_of_parameters_after_iteration = model_multi.create_list_of_allparameters(
allparameters_parametrization_proposal_after_iteration,
list_of_defocuses=list_of_defocuses_input_long,
zmax=self.zmax)
res_multi = model_multi(
list_of_parameters_after_iteration,
return_Images=True,
use_only_chi=use_only_chi,
multi_background_factor=multi_background_factor)
if self.verbosity >= 1:
logging.info('allparameters_parametrization_proposal_after_iteration '
+ str(allparameters_parametrization_proposal_after_iteration[0:5]))
logging.info('list_of_parameters_after_iteration[0][0:5] '
+ str(list_of_parameters_after_iteration[0][0:5]))
final_model_result, list_of_final_model_result, list_of_image_final,\
list_of_finalinput_parameters, list_of_after_chi2, list_of_final_psf_positions = res_multi
# third (last?) time modifying variance image
list_of_single_model_image = list_of_image_final
list_of_var_images_via_model = []
for index_of_single_image in range(len(list_of_sci_images)):
popt = create_popt_for_custom_var(self.list_of_sci_images[index_of_single_image],
self.list_of_var_images[index_of_single_image],
self.list_of_mask_images[index_of_single_image])
single_var_image_via_model =\
create_custom_var_from_popt(list_of_single_model_image[index_of_single_image], popt)
list_of_var_images_via_model.append(single_var_image_via_model)
# replace the variance images provided with these custom variance images
list_of_var_images = list_of_var_images_via_model
# self.list_of_var_images = list_of_var_images
time_end_final = time.time()
if self.verbosity >= 1:
logging.info('Total time taken for final iteration was ' + str(time_end_final
- time_start_final)
+ ' seconds with num_iter: ' + str(num_iter))
if self.save:
np.save('/tigress/ncaplar/Results/list_of_final_model_result_'
+ str(num_iter) + '_' + str(iteration_number), list_of_final_model_result)
np.save(
'/tigress/ncaplar/Results/list_of_image_final_' + str(num_iter) + '_'
+ str(iteration_number), list_of_image_final)
np.save('/tigress/ncaplar/Results/list_of_finalinput_parameters_'
+ str(num_iter) + '_' + str(iteration_number), list_of_finalinput_parameters)
np.save('/tigress/ncaplar/Results/list_of_after_chi2_' + str(num_iter) + '_'
+ str(iteration_number), list_of_after_chi2)
np.save('/tigress/ncaplar/Results/list_of_final_psf_positions_'
+ str(num_iter) + '_' + str(iteration_number), list_of_final_psf_positions)
if self.verbosity >= 1:
logging.info('list_of_final_psf_positions : ' + str(list_of_psf_positions))
##########################################################################
# divided model images by their standard deviations
list_of_image_final_std = []
for i in range(len(list_of_image_0)):
# normalizing by standard deviation image
# May 22 modification
STD = np.sqrt(list_of_var_images[i]) * array_of_std_sum[i]
image_final = list_of_image_final[i]
list_of_image_final_std.append(image_final / STD)
##########################################################################
# masked model images after this iteration (mask by flux criteria)
list_of_M_final = []
list_of_M_final_std = []
for i in range(len(list_of_image_final_std)):
image_final = list_of_image_final[i]
image_final_std = list_of_image_final_std[i]
flux_mask = list_of_flux_mask[i]
# what is list_of_mask_images?
# M_final=((image_final[flux_mask])/np.sum(image_final[flux_mask])).ravel()
M_final = (image_final[flux_mask]).ravel()
# M_final_std=((image_final_std[flux_mask])/np.sum(image_final_std[flux_mask])).ravel()
M_final_std = ((image_final_std[flux_mask]) / 1).ravel()
list_of_M_final.append(M_final)
list_of_M_final_std.append(M_final_std)
# join all M0,M0_std from invidiual images into one uber M0,M0_std
uber_M_final = [item for sublist in list_of_M_final for item in sublist]
uber_M_final_std = [item for sublist in list_of_M_final_std for item in sublist]
uber_M_final = np.array(uber_M_final)
uber_M_final_std = np.array(uber_M_final_std)
uber_M_final_linear_prediction = uber_M0 + \
self.create_linear_aproximation_prediction(H, first_proposal_Tokovnin)
uber_M_final_std_linear_prediction = uber_M0_std + \
self.create_linear_aproximation_prediction(H_std, first_proposal_Tokovnin_std)
if self.save:
np.save(
'/tigress/ncaplar/Results/uber_M_final_' + str(num_iter) + '_'
+ str(iteration_number), uber_M_final)
np.save(
'/tigress/ncaplar/Results/uber_M_final_std_' + str(num_iter) + '_'
+ str(iteration_number), uber_M_final_std)
if self.save:
np.save('/tigress/ncaplar/Results/uber_M_final_linear_prediction_'
+ str(num_iter) + '_' + str(iteration_number), uber_M_final_linear_prediction)
np.save('/tigress/ncaplar/Results/uber_M_final_std_linear_prediction_'
+ str(num_iter) + '_' + str(iteration_number), uber_M_final_std_linear_prediction)
####
# Seeing if there is an improvment
# Quality measure is the sum of absolute differences of uber_I_std (all images/std)
# and uber_M_final_std (all models / std)
# how closely is that correlated with improvments in final_model_result?
# non-std version
# not used, that is ok, we are at the moment using std version
IM_final = np.sum(np.abs(np.array(uber_I) - np.array(uber_M_final)))
# std version
IM_final_std = np.sum(np.abs(np.array(uber_I_std) - np.array(uber_M_final_std)))
# linear prediction versions
IM_final_linear_prediction = np.sum(
np.abs(np.array(uber_I) - np.array(uber_M_final_linear_prediction)))
# std version
IM_final_std_linear_prediction = np.sum(
np.abs( | np.array(uber_I_std) | numpy.array |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
@email: <EMAIL>
@time: 8/13/21 5:44 PM
"""
import copy
import glob
import os
# import glob
import shutil
import numpy as np
import json
import threading
import open3d as o3
import transforms3d as t3d
from vis import draw_registration_result
class Dataset:
def __init__(self):
self.meter_2_mm = True
self.flag_show = False
self.size = -1
self.voxel_sizes = (0.1,)
# self.angles_cutoff_along = ()
# self.angles_cutoff_along = (0.0, )
self.plane_sizes = (0.6,)
self.Gaussian_sigma_factor = (0.02, )
self.n_move = 1
self.translation_rg_factor = (-3.1, 3.1)
self.rotation_reg = (-180.0, 180.0)
self.num_random = (100,)
# self.size = 220
# self.voxel_sizes = (0.07, 0.67)
# self.voxel_sizes = np.arange(self.voxel_sizes[0], self.voxel_sizes[1], (self.voxel_sizes[1]-self.voxel_sizes[0])/4)
#
# self.angles_cutoff_along = (0.0, 360.0)
# self.angles_cutoff_along = np.arange(self.angles_cutoff_along[0], self.angles_cutoff_along[1], 90.0)
# self.angle_cutoff = 90
#
# self.plane_sizes = (0.8, 1.7)
# self.plane_sizes = np.arange(self.plane_sizes[0], self.plane_sizes[1], 0.8)
#
# self.Gaussian_sigma_factor = (0.2, 1.5)
# self.Gaussian_sigma_factor = np.arange(self.Gaussian_sigma_factor[0], self.Gaussian_sigma_factor[1], 0.4)
#
# self.n_move = 6
# self.translation_rg_factor = (-2.5, 2.5)
# self.rotation_reg = (-360.0, 360.0)
# self.num_random = np.arange(50, 250, 100)
# self.instances = ['bunny', 'water_boiler', 'cisco_phone', 'red_mug_white_spots', 'strands_mounting_unit',
# 'burti', 'skull', 'yellow_toy_car', 'fruchtmolke', 'canon_camera_bag', 'dragon_recon',
# 'happy_recon', 'lucy']
# self.instances = ['bunny', 'water_boiler', 'cisco_phone', 'strands_mounting_unit',
# 'burti', 'skull', 'yellow_toy_car', 'fruchtmolke', 'canon_camera_bag', 'dragon_recon',
# 'happy_recon']
self.instances = ['human_models/head_models/model_man/']
self.instances_models = ['3D_model.pcd']
self.instances_datas = ['views/']
self.instances_2_scale = {'bunny', 'water_boiler', 'cisco_phone', 'red_mug_white_spots',
'strands_mounting_unit', 'burti', 'skull', 'yellow_toy_car', 'fruchtmolke',
'canon_camera_bag', 'dragon_recon', 'happy_recon'}
# self.instances = ['bunny', 'water_boiler']
print('Total',
(
len(self.instances)*len(self.voxel_sizes) *
len(self.angles_cutoff_along) if len(self.angles_cutoff_along) > 0 else 1 *
len(self.plane_sizes) if len(self.plane_sizes) > 0 else 1 *
len(self.Gaussian_sigma_factor) if len(self.Gaussian_sigma_factor) > 0 else 1 *
self.n_move
), 'pc will be generated. \n',
len(self.instances), 'instances in account.\n',
len(self.voxel_sizes), 'voxel sizes\n',
len(self.angles_cutoff_along), 'angles\n',
len(self.plane_sizes), 'plane sizes\n',
len(self.Gaussian_sigma_factor), 'noises\n',
self.n_move, ' moves\n',
len(self.voxel_sizes)*len(self.angles_cutoff_along)*len(self.plane_sizes) *
len(self.Gaussian_sigma_factor)*self.n_move, 'combo for each instance.')
self.instances_plane = {'bunny', 'water_boiler', 'cisco_phone', 'red_mug_white_spots',
'strands_mounting_unit', 'burti', 'skull', 'yellow_toy_car', 'fruchtmolke',
'canon_camera_bag', 'dragon_recon', 'happy_recon'}
self.data_info = {'pc_model': None, 'pc_from': None, 'pc_artificial': None, 'instance': None,
'scale': 1, 'unit': '', 'voxel_size': None, 'angle': None, 'pose': [], 'tf': [],
'sigma': None, 'outliers': None, 'plane': None}
# def read_instance(self, dir_path):
# self.dir_path_read = dir_path
# # instances = os.listdir(dir_path)
# instances = self.instances
# self.file_paths = [dir_path + instance + '/3D_model.pcd' for instance in instances]
class ExeThread(threading.Thread):
def __init__(self, thread_id, func, args):
threading.Thread.__init__(self)
self.thread_id = thread_id
self.func = func
self.args = args
def run(self):
print(self.thread_id, '::Creating artificial point cloud start')
self.func(*self.args)
print(self.thread_id, '::Creating artificial point cloud done')
class Writer(Dataset):
def __init__(self):
Dataset.__init__(self)
self.model_file_paths = []
self.datas_file_paths = []
self.poses_file_paths = []
self.filename_len = 6
self.meter_2_mm = True
self.relative_path = False
def write(self, sample_dir_path, output_dir_path, json_path, num_thread=4):
# setup output path and file
if not os.path.isdir(output_dir_path):
os.makedirs(output_dir_path)
else:
shutil.rmtree(output_dir_path)
os.makedirs(output_dir_path)
# self.file_paths = [sample_dir_path + instance + '/3D_model.pcd' for instance in instances]
self.model_file_paths, self.datas_file_paths, self.poses_file_paths = [], [], []
for instance, instance_model in zip(self.instances, self.instances_models):
self.model_file_paths.append(sample_dir_path + instance + instance_model)
for instance, instance_datas in zip(self.instances, self.instances_datas):
datas_file_paths_per_instance, poses_file_paths_per_instance = [], []
for instance_path in glob.glob(sample_dir_path + instance + instance_datas + '*.ply'):
datas_file_paths_per_instance.append(instance_path)
for instance_path in glob.glob(sample_dir_path + instance + instance_datas + '*.pcd'):
datas_file_paths_per_instance.append(instance_path)
# find txt per data file
for data_file_paths_per_instance in datas_file_paths_per_instance:
poses_file_paths_per_instance.append(data_file_paths_per_instance[:-3] + 'txt')
self.datas_file_paths.append(datas_file_paths_per_instance)
self.poses_file_paths.append(poses_file_paths_per_instance)
"""why brother registering parameters to build artificial data layer after layer,
why not build the point cloud in place. Think about how many point cloud will be here after 5 different
downsampling, 6 different cutoff, 4 different plane added, 8 different noise level, it will blow up memory"""
# register sample pcs and artificial info
sources = self.__reg_pc(self.model_file_paths, self.datas_file_paths, self.poses_file_paths, self.instances)
# peek model data
print('Get model point cloud for')
for i, source in enumerate(sources):
print(i, source['instance'])
# register artificial pc parameters
print('Setting up artificial point cloud for')
sources = self.__reg_down_sampling(sources)
# sources = self.__reg_cutoff(sources)
# sources = self.__reg_add_outliers(sources)
sources = self.__reg_add_plane(sources)
sources = self.__reg_add_noise(sources)
sources = self.__reg_add_pose(sources)
print('Set up artificial point cloud for')
# assign slice of sources execution for multithreading
index_sources_thread_list = [int(id_thread * len(sources) / num_thread) for id_thread in range(num_thread)]
# create artificial pc and save it
# # create multithreading for pc maker and saver
exe_thread_list = []
for id_thread in range(num_thread):
index_start_sources = index_sources_thread_list[id_thread]
index_end_sources = index_sources_thread_list[id_thread+1] if id_thread < num_thread-1 else len(sources)
exe_thread_list.append(ExeThread(thread_id=id_thread, func=self.__exe_all, args=(sources, output_dir_path,
index_start_sources,
index_end_sources)))
# # start multithreading for pc maker and saver
for id_thread in range(num_thread):
exe_thread_list[id_thread].start()
# # make sure main thread wait for multithreading for pc maker and saver
for id_thread in range(num_thread):
exe_thread_list[id_thread].join()
# make json file to retrieve data
sources_record = sources # self.__format_json(sources)
with open(json_path, 'w') as f:
json.dump(sources_record, f)
def __exe_all(self, sources, output_dir_path, index_start_sources=0, index_end_sources=-1):
# create artificial pc and save it
instance_cur = None
for index_source, source in enumerate(sources[index_start_sources:index_end_sources]):
id_pc_saving = index_start_sources + index_source
# read pc, down sampling, and so on
self.__load_pc(source, flag_show=self.flag_show)
# output to screen
if not instance_cur or not instance_cur == source['instance']: # notice if instance change
instance_cur = source['instance']
print(' ', id_pc_saving, 'iter: Working on', source['instance'], source['pc_artificial'], ' range from',
source['pc_artificial'].get_min_bound(), 'to', source['pc_artificial'].get_max_bound())
self.__exe_down_sampling(source, flag_show=self.flag_show)
self.__exe_cutoff(source, flag_show=self.flag_show)
# self.__exe_add_outliers(source, flag_show=False)
self.__exe_add_plane(source, flag_show=self.flag_show)
self.__exe_add_noise(source, flag_show=self.flag_show)
self.__exe_add_pose(source, flag_show=self.flag_show)
self.__save_pc(output_dir_path, index=id_pc_saving, source=source)
def __reg_pc(self, model_file_paths, datas_file_paths, poses_file_paths, instances):
sources = []
for model_file_path, datas_file_path, pose_file_paths, instance in zip(model_file_paths, datas_file_paths, poses_file_paths, instances):
for data_file_path, pose_file_path in zip(datas_file_path, pose_file_paths):
tf_init = np.eye(4)
pose_model_in_target = np.loadtxt(pose_file_path)
assert pose_model_in_target.shape == (4, 4), 'Expect pose matrix to be 3x3, but get ' + str(pose_model_in_target.shape) + ' instead'
# transform the point cloud to upright to add plane
if 'bunny' in data_file_path:
tf_init[:3, :3] = t3d.euler.euler2mat(*np.deg2rad([90.0, 0.0, 0.0]))
# tf[:3, 3] = 0
elif 'skull' in data_file_path:
tf_init[:3, :3] = t3d.euler.euler2mat(*np.deg2rad([0.0, 90.0, 0.0]))
tf_init[:3, 3] = 0
elif 'burti' in data_file_path:
tf_init[:3, :3] = t3d.euler.euler2mat(*np.deg2rad([90.0, 0.0, 0.0]))
tf_init[:3, 3] = 0
elif 'dragon_recon' in data_file_path:
tf_init[:3, :3] = t3d.euler.euler2mat(*np.deg2rad([90.0, 0.0, 0.0]))
tf_init[:3, 3] = 0
elif 'happy_recon' in data_file_path:
tf_init[:3, :3] = t3d.euler.euler2mat(*np.deg2rad([90.0, 0.0, 0.0]))
tf_init[:3, 3] = 0
elif 'human' in data_file_path:
tf_init[:3, :3] = t3d.euler.euler2mat(*np.deg2rad([180.0, 0.0, 0.0]))
tf_init[:3, 3] = 0
source = copy.deepcopy(self.data_info)
source['pc_model'] = model_file_path
source['pc_from'] = data_file_path
source['instance'] = instance
source['unit'] = 'mm'
source['tf'].append(tf_init)
source['pose'] = pose_model_in_target
# # # vis to confirm
# pc_src = o3.io.read_point_cloud(source['pc_model'])
# pc_tar = o3.io.read_point_cloud(source['pc_from'])
# draw_registration_result(source=pc_src, target=pc_tar, transformation=source['pose'])
sources.append(source)
# print(file_path, "\n max bounds for geometry coordinates", pc.get_max_bound())
return sources
def __load_pc(self, source, flag_show=False):
tf_init = source['tf'][0]
file_path = source['pc_from']
pc = o3.io.read_point_cloud(filename=file_path)
# normalization: scale up or down according to diagonal range to a size
if source['instance'] in self.instances_2_scale:
bound_min, bound_max = pc.get_min_bound(), pc.get_max_bound()
size_origin = np.linalg.norm(bound_max - bound_min) # here we use second order norm of range in every direction
scale_factor = self.size / size_origin
if self.size == -1:
scale_factor = 1
source['scale'] = scale_factor
else:
source['scale'] = 1
source['pc_artificial'] = pc
pc.scale(scale=source['scale'], center=pc.get_center())
pc.transform(tf_init)
if flag_show:
o3.visualization.draw_geometries([pc], window_name='Read ' + str(source['instance']))
def __save_pc(self, save_dir, index, source):
filename_save = str(index) + '.pcd'
filename_save = '0' * (self.filename_len - len(str(index))) + filename_save
o3.io.write_point_cloud(save_dir + filename_save, source['pc_artificial'])
del source['pc_artificial'] # get rid of the point cloud to save memory
source['pc_artificial'] = save_dir + filename_save # record the artificial pc saving address
def __reg_down_sampling(self, sources):
sources_processed = []
for source in sources:
for voxel_size in self.voxel_sizes:
source_ = copy.deepcopy(source)
source_['voxel_size'] = voxel_size
sources_processed.append(source_)
print('Down sampling', self.voxel_sizes)
assert len(sources_processed) / len(sources) == len(self.voxel_sizes), str(len(sources_processed)) + ' ' + str(
len(sources)) + ' ' + str(len(self.voxel_sizes))
return sources_processed
def __exe_down_sampling(self, source, flag_show=True):
pc = source['pc_artificial']
voxel_size = source['voxel_size']
source['pc_artificial'] = pc.voxel_down_sample(voxel_size)
if flag_show:
o3.visualization.draw_geometries([pc], window_name='Initial Setup down to ' + str(voxel_size))
def __reg_cutoff(self, sources, flag_show=True):
if len(self.angles_cutoff_along) == 0:
return sources
sources_processed = []
for source in sources:
for angle_cutoff_along in self.angles_cutoff_along:
source_ = copy.deepcopy(source)
source_['angle'] = angle_cutoff_along
sources_processed.append(source_)
assert len(sources_processed) / len(sources) == len(self.angles_cutoff_along), str(
len(sources_processed)) + ' ' + str(len(sources)) + ' ' + str(len(self.angles_cutoff_along))
print('Cut off ', self.angles_cutoff_along)
return sources_processed
def __exe_cutoff(self, source, flag_show=True):
if not source['angle']:
return
angle_cutoff_along = np.deg2rad(source['angle'])
pc = source['pc_artificial']
tp = np.asarray(pc.points)
model_center = np.mean(tp, axis=0)
tp = tp - model_center
normal = np.array([-np.sin(angle_cutoff_along), np.cos(angle_cutoff_along), 0.0]).T
# plane_vector = np.array([np.cos(angle_cutoff_along), np.sin(angle_cutoff_along), 0.0]).T
# plane_vector = normal
mask = np.dot(tp, normal)
mask = mask < 0
tp = tp[mask, :]
pc.points = o3.utility.Vector3dVector(tp)
if flag_show:
o3.visualization.draw_geometries([pc], window_name='Cutoff at ' + str(angle_cutoff_along))
def __reg_add_pose(self, sources):
sources_processed = []
for source in sources:
for i in range(self.n_move):
source_ = copy.deepcopy(source)
translation = ((self.translation_rg_factor[1] - self.translation_rg_factor[0]) * np.random.random(3) +
self.translation_rg_factor[0])
orientation = (self.rotation_reg[1] - self.rotation_reg[0]) * np.random.random((3, 1)) + \
self.rotation_reg[0]
tf_random = np.identity(4)
tf_random[:3, :3] = t3d.euler.euler2mat(*np.deg2rad(orientation))
tf_random[:3, 3] = translation
source_['tf'].append(tf_random)
sources_processed.append(source_)
print('# of Pose', self.n_move)
return sources_processed
def __exe_add_pose(self, source, flag_show=False):
pc = source['pc_artificial']
pc_init = copy.deepcopy(pc) if flag_show else None
# change the pose according to model size
tf_random = source['tf'][-1]
tp = np.asarray(pc.points)
rg = tp.max(axis=0) - tp.min(axis=0) # range
tf_random[:3, 3] = rg * tf_random[:3, 3] # rearrange translation
source['tf'][-1] = tf_random
del tf_random
pc.transform(source['tf'][-1])
# accumulate transformations to make final pose
assert len(source['tf']) == 2, 'Expect w transformation, but got ' + str(len(source['tf']))
tf_final = np.eye(4)
for tf_ in source['tf']:
tf_final = np.matmul(tf_, tf_final)
source['pose'] = np.matmul(tf_final, source['pose'])
# # # vis to confirm
# pc_src = o3.io.read_point_cloud(source['pc_model'])
# pc_tar = source['pc_artificial']
# draw_registration_result(source=pc_src, target=pc_tar, transformation=source['pose'])
source['tf'] = [tf_.tolist() for tf_ in source['tf']]
source['pose'] = source['pose'].tolist()
if flag_show:
# pc_model = o3.io.read_point_cloud(source['pc_model'])
o3.visualization.draw_geometries([pc_init, pc], window_name='Move at ' + str(tf_final))
# def __reg_add_outliers(self, sources):
# sources_processed = []
# for source in sources:
# for n_random in self.num_random:
# source_ = copy.deepcopy(source)
# source_['outliers'] = n_random
# sources_processed.append(source_)
# assert len(sources_processed) / len(sources) == len(self.num_random), str(len(sources_processed)) + ' ' + str(
# len(sources)) + ' ' + str(len(self.num_random))
# print('# of outliers', self.num_random)
# return sources_processed
#
# def __exe_add_outliers(self, source, flag_show=True):
# pc = source['pc_artificial']
# tp = np.asarray(pc.points)
# """setup outliers"""
# rg = 1.5 * (tp.max(axis=0) - tp.min(axis=0)) # range
# n_random = source['outliers']
# rands = (np.random.rand(n_random, 3) - 0.5) * rg + tp.mean(axis=0)
# pc.points = o3.utility.Vector3dVector(np.r_[tp, rands])
# if flag_show:
# o3.visualization.draw_geometries([pc], window_name='Initial Setup add ' + str(n_random) + ' outliers')
def __reg_add_plane(self, sources):
sources_processed = []
for source in sources:
for plane_size in self.plane_sizes:
source_ = copy.deepcopy(source)
source_['plane'] = plane_size
sources_processed.append(source_)
print('Adding planes', self.plane_sizes)
return sources_processed
def __exe_add_plane(self, source, flag_show=True):
# if source['instance'] not in self.instances_plane: return
plane_axis = (0, 1)
plane_normal_axis = 2
dis_nearest_neighbor = source['voxel_size']
plane_size = source['plane']
pc = source['pc_artificial']
tp = np.asarray(pc.points)
# dis_nearest_neighbor = min(np.linalg.norm(tp - tp[0, :], axis=1)[2:])
rg = 1.5 * (tp.max(axis=0) - tp.min(axis=0)) # range
# add a plane underneath the model
# dis_nearest_neighbor = dis_nearest_neighbor / plane_size
nx = int(plane_size * rg[plane_axis[0]] / dis_nearest_neighbor)
ny = int(plane_size * rg[plane_axis[1]] / dis_nearest_neighbor)
x = np.linspace(-plane_size * rg[plane_axis[0]], rg[plane_axis[0]] * plane_size, nx)
y = np.linspace(-plane_size * rg[plane_axis[1]], rg[plane_axis[1]] * plane_size, ny)
x, y = np.meshgrid(x, y)
# make a empty shadow
mask = np.logical_or(y < - rg[plane_axis[0]] / 8, np.logical_or(x < - rg[plane_axis[0]] / 4, x > rg[plane_axis[0]] / 4))
x, y = x[mask], y[mask]
z = np.zeros(y.shape) + tp.min(axis=0)[plane_normal_axis]
if 'model_women' in source['instance']:
z -= 135
plane = | np.stack([x, y, z]) | numpy.stack |
from typing import Iterator, List
from amuse.datamodel.particles import Particle, Particles
from amuse.lab import units
from amuse.units.quantities import ScalarQuantity
from astropy.io import fits
from astropy.io.fits.hdu.table import BinTableHDU
import pandas
import numpy as np
class Snapshot:
fields = {
'x': units.kpc, 'y': units.kpc, 'z': units.kpc,
'vx': units.kms, 'vy': units.kms, 'vz': units.kms,
'mass': units.MSun,
'is_barion': None
}
def __init__(self,
particles: Particles = Particles(),
timestamp: ScalarQuantity = 0 | units.Myr
):
self.particles = particles
self.timestamp = timestamp
def __getitem__(self, value) -> 'Snapshot':
return Snapshot(self.particles[value], self.timestamp)
def __add__(self, other: 'Snapshot') -> 'Snapshot':
if self.timestamp == other.timestamp:
particles = Particles()
particles.add_particles(self.particles)
particles.add_particles(other.particles)
return Snapshot(particles, self.timestamp)
else:
raise RuntimeError('Tried to sum snapshots with different timestamps.')
def add(self, other: 'Snapshot', ignore_timestamp = False):
'''
Adds other snapshot to this one. If ignore_timestamps is False,
does not change timestamp. Otherwise RuntimeError would be thrown if
timestamps are different.
'''
if not ignore_timestamp and (self.timestamp != other.timestamp):
raise RuntimeError('Tried to sum snapshots with different timestamps.')
self.particles.add_particles(other.particles)
@staticmethod
def file_info(filename: str) -> int:
'''
Returns number of snapshots in the FITS file.
'''
hdul = fits.open(filename, memmap = True)
number_of_snaps = len(hdul) - 1
hdul.close()
return number_of_snaps
def to_fits(self, filename: str, append: bool = False):
cols = []
for (key, val) in Snapshot.fields.items():
array = getattr(self.particles, key)
fmt = 'L'
if val is not None:
array = array.value_in(val)
fmt = 'E'
col = fits.Column(
name = key,
unit = str(Snapshot.fields[key]),
format = fmt,
array = array
)
cols.append(col)
cols = fits.ColDefs(cols)
hdu = fits.BinTableHDU.from_columns(cols)
hdu.header['TIME'] = self.timestamp.value_in(units.Myr)
if append:
try:
fits.append(filename, hdu.data, hdu.header)
except:
hdu.writeto(filename, overwrite = True)
else:
hdu.writeto(filename, overwrite = True)
@staticmethod
def from_fits(filename: str) -> Iterator['Snapshot']:
hdul = fits.open(filename, memmap = True)
snapshot = Snapshot(Particles, 0 | units.Myr)
for frame in range(len(hdul) - 1):
table: BinTableHDU = hdul[frame + 1]
number_of_particles = len(table.data[list(Snapshot.fields.keys())[0]])
snapshot.timestamp = table.header['TIME'] | units.Myr
snapshot.particles = Particles(number_of_particles)
for (key, val) in Snapshot.fields.items():
if val is not None:
setattr(snapshot.particles, key, table.data[key] | val)
else:
data = np.array(table.data[key], dtype = np.float64)
setattr(snapshot.particles, key, data)
yield snapshot
@staticmethod
def from_fits_frame(filename: str, frame: int = 0) -> 'Snapshot':
hdul = fits.open(filename, memmap = True)
snapshot = Snapshot(Particles, 0 | units.Myr)
table: BinTableHDU = hdul[frame + 1]
snapshot.timestamp = table.header['TIME'] | units.Myr
number_of_particles = len(table.data[list(Snapshot.fields.keys())[0]])
snapshot.particles = Particles(number_of_particles)
for (key, val) in Snapshot.fields.items():
setattr(snapshot.particles, key, table.data[key] | val)
return snapshot
@staticmethod
def from_csv(filename: str, delimiter: str = ',') -> 'Snapshot':
table = pandas.read_csv(filename, delimiter = delimiter, index_col = False)
table['barion'].map({ 'True': True, 'False': False })
particles = Particles(len(table.iloc[:, 0]))
particles.x = np.array(table['x']) | units.kpc
particles.y = np.array(table['y']) | units.kpc
particles.z = np.array(table['z']) | units.kpc
particles.vx = np.array(table['vx']) | units.kms
particles.vy = np.array(table['vy']) | units.kms
particles.vz = np.array(table['vz']) | units.kms
particles.mass = | np.array(table['m']) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
import os
import pytest
import numpy as np
from matrixprofile.algorithms.top_k_discords import top_k_discords
def test_mp_all_same():
profile = {
'mp': np.ones(10),
'ez': 2,
'w': 4,
'class': 'MatrixProfile'
}
discords = top_k_discords(profile)['discords']
desired = np.array([9, 6, 3])
np.testing.assert_almost_equal(discords, desired)
def test_discords_no_exclusion():
profile = {
'mp': np.array([1, 2, 3, 4]),
'w': 4,
'class': 'MatrixProfile'
}
desired = np.array([3, 2, 1])
discords = top_k_discords(profile, k=3, exclusion_zone=0)['discords']
np.testing.assert_almost_equal(discords, desired)
def test_discords_no_exclusion_all():
profile = {
'mp': | np.array([1, 2, 3, 4]) | numpy.array |
#!/usr/bin/env python3
# Copyright 2021 <NAME>, <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import os
from networks.tpdi_networks import DFCNetwork
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
from matplotlib.colors import ListedColormap
from matplotlib.ticker import FormatStrFormatter
mpl.rcParams['mathtext.fontset'] = 'stix'
mpl.rcParams['font.family'] = 'STIXGeneral'
out_dir = './logs/toy_experiments/fig2'
if not os.path.exists(out_dir):
os.makedirs(out_dir)
np.random.seed(42)
torch.manual_seed(42)
n_in=5
n_hidden=[2]
n_out=2
nb_Q = 2000
nb_J_damped = 100
fit_on = 'total' # 'J', 'total', 'Q'
def rescale(matrix, scale=1.):
matrix_magnitude = np.linalg.norm(matrix)
return scale/matrix_magnitude * matrix
def all_positive_eig(A):
lamb = np.linalg.eigvals(A)
return sum(lamb.real<0) == 0
def all_negative_eig(A):
lamb = np.linalg.eigvals(A)
return sum(lamb.real>0) == 0
def generate_random_Q(jac):
while True:
permutation = np.random.randn(n_out,n_out)
Q_rand = np.matmul(jac.T, permutation)
if all_positive_eig(np.matmul(jac, Q_rand)):
return rescale(Q_rand.flatten())
def compute_damped_jac(jac, damping):
curv = np.matmul(jac, jac.T)
return rescale(np.matmul(jac.T,
np.linalg.inv(curv + damping * np.eye(curv.shape[0]))).flatten())
net = DFCNetwork(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
activation='tanh', initialization='xavier_normal')
x = torch.randn((1, n_in))
net.forward(x)
jac = net.compute_full_jacobian(linear=True).squeeze(0).numpy()
Qs_vectorized = np.zeros((nb_Q, jac.size))
for i in range(nb_Q):
Qs_vectorized[i,:] = generate_random_Q(jac)
J_damped_pinv = np.zeros((nb_J_damped, jac.size))
damping_values = np.logspace(-5, 2, num=nb_J_damped)
for i, damping in enumerate(damping_values):
J_damped_pinv[i, :] = compute_damped_jac(jac, damping)
J_pinv = np.expand_dims(rescale(np.linalg.pinv(jac).flatten()), 0)
J_trans = np.expand_dims(rescale(jac.T.flatten()), 0)
QJ_combined = np.concatenate((Qs_vectorized, J_damped_pinv, J_pinv, J_trans), axis=0)
pca = PCA(n_components=2)
if fit_on == 'Q':
pca.fit(Qs_vectorized)
elif fit_on == 'J':
pca.fit(J_damped_pinv)
elif fit_on == 'total':
pca.fit(QJ_combined)
Qs_pca = pca.transform(Qs_vectorized)
J_damped_pinv_pca = pca.transform(J_damped_pinv)
J_pinv_pca = pca.transform(J_pinv)
J_trans_pca = pca.transform(J_trans)
min_x_axis = np.min([np.min(Qs_pca[:, 0]), np.min(J_damped_pinv_pca[:,0]), np.min(J_pinv_pca[:,0]), np.min(J_trans_pca[:,0])])
min_y_axis = np.min([np.min(Qs_pca[:, 1]), | np.min(J_damped_pinv_pca[:,1]) | numpy.min |
import random
import imgaug
import torch
import numpy as np
from utils.utils import model_dict,model_dict_proba_list
seed = 1024
random.seed(seed)
imgaug.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def main2():
global model_dict_proba_list
test_results_list = []
for model_name, checkpoint_path in model_dict:
test_results = np.load(
"../user_data/temp_data/{}.npy".format(checkpoint_path), allow_pickle=True
)
'''
min = np.min(test_results, axis=1)
min = np.expand_dims(min, axis=1)
sigma = np.std(test_results, axis=1)
sigma = np.expand_dims(sigma, axis=1)
test_results = (test_results - min) / sigma
'''
test_results_list.append(test_results)
test_results_list = np.array(test_results_list)
print(test_results_list.shape)
model_dict_proba_list = np.array(model_dict_proba_list)
#print(model_dict_proba_list.shape)
model_dict_proba_list = np.transpose(model_dict_proba_list, (1,0))
print(model_dict_proba_list.shape)
test_results_list = np.transpose(test_results_list,(1,0,2))
tmp_test_result_list = np.multiply(test_results_list,model_dict_proba_list)
print(tmp_test_result_list.shape)
tmp_test_result_list = np.sum(tmp_test_result_list, axis=1)
print(tmp_test_result_list.shape)
tmp_test_result_list = np.argmax(tmp_test_result_list, axis=1)
tmp_test_result_list = tmp_test_result_list.reshape(-1)
return tmp_test_result_list
def gen_csv():
import pandas as pd
import sys
sys.path.append("..")
# from emotion2.preprocess import EMOTION_DICT
EMOTION_DICT = {
0: "angry",
1: "disgusted",
2: "fearful",
3: "happy",
4: "sad",
5: "surprised",
6: "neutral",
}
res = main2()
res_labels = []
mapping = | np.loadtxt("mapping.txt", dtype=np.int) | numpy.loadtxt |
'''
Authors: <NAME>, <NAME>, <NAME>
Email ID: <EMAIL>, <EMAIL>, <EMAIL>
'''
import keras
import tensorflow as tf
from keras.models import Sequential
from keras.models import Model
#from tensorflow.keras import layers
#from tensorflow.keras import optimizers
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Activation
from sklearn.neighbors import KernelDensity
from keras.layers import Masking
from keras.layers import Input
from keras.layers import Concatenate
from keras import optimizers
from scipy.stats import spearmanr
from scipy import stats
from statistics import mean
import copy
import mlflow
import seaborn as sns
import numpy as np
from matplotlib import pyplot as plt
from numpy import genfromtxt
from sklearn.utils import shuffle
import csv
import random
import math
import sklearn
import mlflow
import mlflow.keras
from sklearn.metrics import mean_squared_error
from matplotlib import pyplot as plt
import os
import glob
import multiprocessing as mp
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
import matplotlib.cm
import matplotlib
import argparse
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.svm import SVR
from sklearn.kernel_ridge import KernelRidge
from xgboost import XGBRegressor
from sklearn.neighbors import RadiusNeighborsRegressor
from xgboost import XGBRFRegressor
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
numLatency = 118
embeddingsFile = "onnxEmbeddings.csv"
lat = []
maxVal = 0
matplotlib.use('Agg')
def parse_latency(file):
global lat
data = np.genfromtxt(file, delimiter=',')
latency = np.mean(data, axis=1)
latency = latency[:numLatency]
lat.append(latency)
#latency = latency/np.amax(latency)
return latency
def parse_features():
Features = []
maxLayer = 0
maxFlops = 0
maxChannel = 0
maxDim = 224
maxKernel = 7
maxPadding = 3
with open(embeddingsFile, newline='') as f:
reader = csv.reader(f)
data = list(reader)
for i in range(len(data)):
temp = [data[i][j * 13:(j + 1) * 13] for j in range((len(data[i]) + 12) // 13 )]
maxLayer = max(maxLayer, len(temp))
for j in range(len(temp)):
maxFlops=max(maxFlops, float(temp[j][12]))
maxChannel = max(maxChannel, int(temp[j][7]))
maxChannel = max(maxChannel, int(temp[j][8]))
Features.append(temp)
numpyFeatures = np.ones((len(Features), maxLayer, 13))
numpyFeatures = numpyFeatures*-1
for i in range(len(Features)):
temp = Features[i]
for j in range(len(temp)):
for k in range(len(temp[j])):
numpyFeatures[i][j][k] = temp[j][k]
if k == 5 or k == 6:
numpyFeatures[i][j][k] = numpyFeatures[i][j][k]/maxDim
elif k == 7 or k == 8:
numpyFeatures[i][j][k] = numpyFeatures[i][j][k]/maxChannel
elif k == 9:
numpyFeatures[i][j][k] = numpyFeatures[i][j][k]/maxKernel
elif k == 12:
numpyFeatures[i][j][k] = numpyFeatures[i][j][k]/maxFlops
return numpyFeatures, maxLayer
def learn_xgb_model(hardware, maxLayer, lat_mean, features, featuresShape, splitPercentage=0.99, shuffleFeatures=True):
numSample = len(lat_mean)
features = features[:numSample]
if shuffleFeatures == True:
features, lat_mean = shuffle(features,lat_mean)
trainf = features[:int(splitPercentage*len(features))]
trainy = lat_mean[:int(splitPercentage*len(features))]
testf = features[int(splitPercentage*len(features)):]
testy = lat_mean[int(splitPercentage*len(features)):]
print("================= Dataset Stage ==============")
print(trainf.shape, trainy.shape, testf.shape, testy.shape)
trainf = np.reshape(trainf, (trainf.shape[0], trainf.shape[1]*trainf.shape[2]))
testf = np.reshape(testf, (testf.shape[0], testf.shape[1]*testf.shape[2]))
model = XGBRegressor()
model.fit(trainf, trainy)
trainPredict = model.predict(trainf)
testPredict = model.predict(testf)
trainScore = math.sqrt(mean_squared_error(trainy, trainPredict))
testScore = math.sqrt(mean_squared_error(testy, testPredict))
### Train Model characteristics
r2_score = sklearn.metrics.r2_score(trainy, trainPredict)
s_coefficient, pvalue = spearmanr(trainy, trainPredict)
writeToFile('Train Score: %f RMSE' % (trainScore))
writeToFile("The R^2 Value for %s: %f"%(hardware, r2_score))
writeToFile("The Spearnman Coefficient and p-value for %s: %f and %f"%(hardware, s_coefficient, pvalue))
plt.figure()
plt.xlabel("Actual Latency (in ms)")
plt.ylabel("Predicted Latency (in ms)")
sns.scatterplot(trainy, trainPredict)
plt.savefig(args.name+'/plots/'+hardware+'_'+args.learning_type+'_'+str(splitPercentage)+'_train.png')
r2_score = sklearn.metrics.r2_score(testy, testPredict)
s_coefficient, pvalue = spearmanr(testy, testPredict)
writeToFile('Test Score: %f RMSE' % (testScore))
writeToFile("The R^2 Value for %s: %f"%(hardware, r2_score))
writeToFile("The Spearnman Coefficient and p-value for %s: %f and %f"%(hardware, s_coefficient, pvalue))
plt.figure()
plt.xlabel("Actual Latency (in ms)")
plt.ylabel("Predicted Latency (in ms)")
sns.scatterplot(testy, testPredict)
plt.savefig(args.name+'/plots/'+hardware+"_"+args.learning_type+'_'+str(1-splitPercentage)+'_test.png')
return model
def learn_xgb_model_collab(hardware, maxLayer, lat_mean, features, featuresShape, splitPercentage=0.99, shuffleFeatures=True):
print('Learning' + hardware)
numSample = len(lat_mean)
features = features[:numSample]
if shuffleFeatures == True:
features, lat_mean = shuffle(features,lat_mean)
testf = features
testy = lat_mean
testf = np.reshape(testf, (testf.shape[0], testf.shape[1]*testf.shape[2]))
results = []
index = []
for i in range(10, numSample):
trainf = features[:i]
trainy = lat_mean[:i]
# print("================= Dataset Stage ==============")
# print(trainf.shape, trainy.shape, testf.shape, testy.shape)
trainf = np.reshape(trainf, (trainf.shape[0], trainf.shape[1]*trainf.shape[2]))
model = XGBRegressor()
model.fit(trainf, trainy)
testPredict = model.predict(testf)
testScore = math.sqrt(mean_squared_error(testy, testPredict))
r2_score = sklearn.metrics.r2_score(testy, testPredict)
s_coefficient, pvalue = spearmanr(testy, testPredict)
results.append(r2_score)
index.append(i)
matplotlib.rcParams['figure.dpi'] = 500
plt.figure()
plt.xlabel("Number of Datapoints")
plt.ylabel("Average R^2")
sns.lineplot(index, results)
plt.savefig(args.name+'/plots/'+hardware+'_indiLearn.png')
f = open(args.name+'/meta/plotdata.txt', a)
s1 = ','.join(map(str, index))
s2 = ','.join(map(str, results))
f.write(hardware+'\n'+s1+'\n'+s2+'\n')
f.close()
def learn_lstm_model(hardware, maxLayer, lat_mean, features, featuresShape):
numSample = len(lat_mean)
features = features[:numSample]
features, lat_mean = shuffle(features,lat_mean)
trainf = features[:int(0.99*len(features))]
trainy = lat_mean[:int(0.99*len(features))]
#testf = features[:int(1.0*len(features))]
#testy = lat_mean[:int(1.0*len(features))]
testf = features[int(0.99*len(features)):]
testy = lat_mean[int(0.99*len(features)):]
print("================= Dataset Stage ==============")
print(trainf.shape, trainy.shape, testf.shape, testy.shape)
#mlflow.keras.autolog()
#Create an LSTM model
model=Sequential()
model.add(Masking(mask_value=-1,input_shape=(maxLayer, featuresShape)))
model.add(LSTM(20, activation='relu'))
model.add(Dense(1, name = 'fc'))
opt = optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, amsgrad=False)
#initial_learning_rate = 0.01
# lr_schedule = optimizers.schedules.ExponentialDecay(initial_learning_rate,
#opt = optimizers.SGD(learning_rate = initial_learning_rate)
model.compile(loss='mean_squared_error', optimizer=opt, metrics=[keras.metrics.MeanAbsolutePercentageError()])
model.summary()
#filepath="checkpoint-{loss:.5f}-{val_loss:.5f}-{val_mean_absolute_percentage_error}.hdf5"
filepath=args.name+'/models/model.hdf5'
#checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')#montor can be val_loss or loss
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min')#montor can be val_loss or loss
es = EarlyStopping(monitor='loss', mode='min', verbose=1, patience=50)
val = model.fit(trainf, trainy, epochs=250, batch_size=512, verbose=1, callbacks=[es, checkpoint])
#val = model.fit(trainf, trainy, epochs=250, batch_size=512, verbose=1, callbacks=[es, checkpoint], validation_data=(testf, testy))
model.load_weights(filepath)
trainPredict = model.predict(trainf)
testPredict = model.predict(testf)
trainScore = math.sqrt(mean_squared_error(trainy, trainPredict))
writeToFile('Train Score: %f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testy, testPredict))
### Train Model characteristics
r2_score = sklearn.metrics.r2_score(trainy, trainPredict)
s_coefficient, pvalue = spearmanr(trainy, trainPredict)
writeToFile('Train Score: %f RMSE' % (trainScore))
writeToFile("The R^2 Value for %s: %f"%(hardware, r2_score))
writeToFile("The Spearnman Coefficient and p-value for %s: %f and %f"%(hardware, s_coefficient, pvalue))
plt.figure()
plt.xlabel("Actual Latency (in ms)")
plt.ylabel("Predicted Latency (in ms)")
sns.scatterplot(trainy, trainPredict[:,0])
#plt.title(hardware+' R2: '+str(r2_score)+' SpearVal: '+str(s_coefficient))
plt.savefig(args.name+'/plots/'+hardware+"_"+args.learning_type+'_train.png')
r2_score = sklearn.metrics.r2_score(testy, testPredict)
s_coefficient, pvalue = spearmanr(testy, testPredict)
writeToFile('Test Score: %f RMSE' % (testScore))
writeToFile("The R^2 Value for %s: %f"%(hardware, r2_score))
writeToFile("The Spearnman Coefficient and p-value for %s: %f and %f"%(hardware, s_coefficient, pvalue))
plt.figure()
plt.xlabel("Actual Latency (in ms)")
plt.ylabel("Predicted Latency (in ms)")
sns.scatterplot(testy, testPredict[:,0])
#plt.title(hardware+' R2: '+str(r2_score)+' SpearVal: '+str(s_coefficient))
plt.savefig(args.name+'/plots/'+hardware+"_"+args.learning_type+'_test.png')
### Adding Other Regressors
extractor = Model(outputs=model.get_layer('fc').input, inputs=model.input)
extractor.summary()
knn = KNeighborsRegressor()
trainPredict = extractor.predict(trainf)
testPredict = extractor.predict(testf)
randForest = RandomForestRegressor()
decisionTree = DecisionTreeRegressor()
svr = SVR()
kernelrdidge = KernelRidge()
xgb = XGBRegressor()
xgbrf = XGBRFRegressor()
modellist = [ ('knn', knn), ('randomForest', randForest), ('dTree', decisionTree), ('svr', svr), ('kerenlrdige', kernelrdidge), ('xgb', xgb), ('xgbrf', xgbrf) ]
for name, model_lowB in modellist:
model_lowB.fit(trainPredict, trainy)
modeltestPred = model_lowB.predict(testPredict)
testScore = math.sqrt(mean_squared_error(testy, modeltestPred))
r2_score = sklearn.metrics.r2_score(testy, modeltestPred)
s_coefficient, pvalue = spearmanr(testy, modeltestPred)
writeToFile('Test Score with %s : %f RMSE' % (name, testScore))
writeToFile("The R^2 Value with %s for %s: %f"%(hardware, name, r2_score))
writeToFile("The Spearnman Coefficient and p-value for %s with %s : %f and %f"%(hardware, name, s_coefficient, pvalue))
plt.figure()
plt.xlabel("Actual Latency (in ms)")
plt.ylabel("Predicted Latency (in ms)")
sns.scatterplot(testy, modeltestPred)
#plt.title(name + hardware+' R2: '+str(r2_score)+' SpearVal: '+str(s_coefficient))
plt.savefig(args.name+'/plots/'+hardware+args.learning_type+'_'+name+'.png')
return (model, modellist, extractor)
'''
This function takes in the dictionary of hardware_names to its maxLayer, latency and features map
net_dict[key][2] - refers to the network features for a hardware and
net_dict[key][1] - refers to the latency for that hardware
1. First determine the mean and std of the latencies for each hardware in the dictionary
2. Sample from the distribution - i.e. from Mu-8*sigma to Mu+2*sigma, at each parts of the distribution, find all indices that intersect in all the hardwares considered here. For ex., if network no. 2374 falls between mu-1*sigma and mu for all the hardware devices in the dictionary, then add 2374 to the representation set for all the hardware
3. Find maxSamples such networks that become the golden representation of the hardware
4. Return the list of lists of maxSamples network representation for all hardwares and also the indices of the representation networks
5. The indices will be used by any hardware not on the list to make and append it's representation
TODO: Not using max samples for now - change
'''
def sample_hwrepresentation(net_dict, maxSamples):
mean_lat = []
sd_lat = []
final_indices = []
#Determining the Mean and Standard Deviation of Latencies
for key in net_dict:
net_dict[key][2] = net_dict[key][2][:numLatency,:,:] #Not required actually.. Simply doing
net_dict[key][1] = net_dict[key][1][:numLatency]
print(np.mean(net_dict[key][1]), np.std(net_dict[key][1]))
mean_lat.append(np.mean(net_dict[key][1]))
sd_lat.append(np.std(net_dict[key][1]))
for i in range(-2,8): #This range might not be enough -- the range should be more generic when hardware increases
index_where = []
index = 0
for key in net_dict:
index_where.append(np.where(np.logical_and(net_dict[key][1] > mean_lat[index]+i*sd_lat[index], net_dict[key][1] <= mean_lat[index]+(i+1)*sd_lat[index])))
index += 1
for j in range(len(index_where)):
index_where[0] = np.intersect1d(index_where[0], index_where[j])
final_intersection = index_where[0]
if len(final_intersection) >= 4:
loop_index = 4
else:
loop_index = len(final_intersection)
hw_features_cncat = []
for j in range(loop_index):
final_indices.append(final_intersection[j])
print("The final indices size is %f"%(len(final_indices)))
for key in net_dict:
hw_features_per_device = []
for j in range(len(final_indices)):
hw_features_per_device.append(net_dict[key][1][final_indices[j]])
net_dict[key][1] = np.delete(net_dict[key][1], final_indices, axis=0)
net_dict[key][2] = np.delete(net_dict[key][2], final_indices, axis=0)
hw_features_cncat.append(hw_features_per_device)
print(len(final_indices), net_dict[key][2].shape)
return final_indices, hw_features_cncat
def random_indices(maxSamples):
rand_indices = []
for i in range(maxSamples):
rand_indices.append(random.randint(0,numLatency-1))
return rand_indices
'''
Function which computes total MACs of each network and samples maxSamples indices from it based on FLOPS.
'''
def flopsBasedIndices(maxSamples):
with open('../DiverseRandNetworkGenerator/Embeddings.csv') as f:
reader = csv.reader(f)
data = list(reader)
totalFLOPSList = np.zeros(len(data))
for i in range(len(data)):
temp = [data[i][j * 13:(j + 1) * 13] for j in range((len(data[i]) + 12) // 13 )]
for j in range(len(temp)):
totalFLOPSList[i]+=int(temp[j][12])
mean = np.mean(totalFLOPSList)
sd = np.std(totalFLOPSList)
def random_sampling(net_dict, rand_indices, maxSamples):
for key in net_dict:
net_dict[key][2] = net_dict[key][2][:numLatency,:,:]
net_dict[key][1] = net_dict[key][1][:numLatency]
hw_features_cncat = []
#rand_indices = []
#final_indices = []
#for i in range(maxSamples):
# rand_indices.append(random.randint(0,5000))
for key in net_dict:
hw_features_per_device = []
for j in range(maxSamples):
hw_features_per_device.append(net_dict[key][1][rand_indices[j]])
hw_features_cncat.append(hw_features_per_device)
#If this is not done separately, the code will break
for key in net_dict:
net_dict[key][1] = np.delete(net_dict[key][1], rand_indices, axis=0)
net_dict[key][2] = np.delete(net_dict[key][2], rand_indices, axis=0)
return hw_features_cncat
'''
Append the hardware representation with the available network representation in axis = 2 (3rd dimension)
and also append all the hardwares together along axis = 0 (row dimension) to form a huge training set of multiple
hardware devices
'''
def append_with_net_features(net_dict, hw_features_cncat):
new_lat_ft = []
appended_features = []
appended_latencies = []
index = 0
for key in net_dict:
print("======================================================")
print(len(hw_features_cncat[index]))
new_lat_ft = np.tile(hw_features_cncat[index], (net_dict[key][2].shape[0], net_dict[key][2].shape[1], 1))
temp = np.concatenate((net_dict[key][2], new_lat_ft), axis=2)
print(new_lat_ft.shape, net_dict[key][2].shape, temp.shape)
if index == 0:
appended_features = temp
appended_latencies = net_dict[key][1]
else:
appended_features = np.concatenate((appended_features, temp), axis=0)
appended_latencies = np.concatenate((appended_latencies, net_dict[key][1]), axis=0)
index += 1
print(appended_features.shape, appended_latencies.shape)
#print(appended_features, appended_latencies)
return appended_latencies, appended_features
def corr_choose(rho, maxSamples, threshold = 0.97, stop_condition = 5, debug=True):
elements = [numLatency]
subset = []
indices = range(rho.shape[0])
if debug:
print("Before start : Number of remaining vectors", rho.shape[0])
for i in range(maxSamples):
# add_ = np.argmax(np.sum(rho, axis=1))
add_ = np.argmax(np.sum(rho > threshold, axis=1))
subset += [indices[add_]]
remove_set = []
for j in range(rho.shape[0]):
if rho[j, add_] > threshold:
remove_set += [j]
rho = np.delete(rho, remove_set, axis=0)
rho = np.delete(rho, remove_set, axis=1)
indices = np.delete(indices, remove_set)
elements.append(rho.shape[0])
if debug:
print('Iteration', i, ": Number of remaining vectors", rho.shape[0])
if len(indices) <= stop_condition:
break
if debug:
print('Chosen networks are ', subset)
matplotlib.rcParams['figure.dpi'] = 500
plt.figure()
plt.xlabel('Iterations')
plt.ylabel('Number of Networks remaining')
# plt.title('Mutual Information Score over iterations')
plt.plot(np.arange(len(elements)), elements,'-o')
plt.savefig(args.name+'/plots/spearman.png')
return subset
def corr_eval(rho, subset, threshold = 0.97):
count_close = 0
for i in range(rho.shape[0]):
if i in subset:
count_close += 1
continue
max_ = 0
for j in subset:
max_ = max(rho[i, j], max_)
if max_ > threshold:
count_close += 1
return count_close/rho.shape[0]
def spearmanCorr(net_dict, numSamples):
index = 0
global lat
ll = np.array(lat)
for key in net_dict:
net_dict[key][2] = net_dict[key][2][:numLatency, :, :]
net_dict[key][1] = net_dict[key][1][:numLatency]
for key in net_dict:
if index == 0:
stacked_arr = net_dict[key][1]
else:
stacked_arr = np.column_stack((stacked_arr, net_dict[key][1]))
index+=1
rho, p = spearmanr(ll)
print(rho)
print(rho.shape)
sel_list = corr_choose(rho, numSamples, 0.98)
print('Evaluation scores is', corr_eval(rho, sel_list, 0.98))
#exit(0)
hw_features_cncat = []
for key in net_dict:
hw_features_per_device = []
for j in range(len(sel_list)):
hw_features_per_device.append(net_dict[key][1][sel_list[j]])
hw_features_cncat.append(hw_features_per_device)
#If this is not done separately, the code will break
for key in net_dict:
net_dict[key][1] = np.delete(net_dict[key][1], sel_list, axis=0)
net_dict[key][2] = np.delete(net_dict[key][2], sel_list, axis=0)
return sel_list, hw_features_cncat
def pearsonCorr(net_dict, numSamples):
index = 0
global lat
ll = np.array(lat)
for key in net_dict:
net_dict[key][2] = net_dict[key][2][:numLatency, :, :]
net_dict[key][1] = net_dict[key][1][:numLatency]
for key in net_dict:
if index == 0:
stacked_arr = net_dict[key][1]
else:
stacked_arr = np.column_stack((stacked_arr, net_dict[key][1]))
index+=1
rho = np.corrcoef(ll)
print(rho)
print(rho.shape)
sel_list = corr_choose(rho, numSamples, 0.98)
print('Evaluation scores is', corr_eval(rho, sel_list, 0.98))
#exit(0)
hw_features_cncat = []
for key in net_dict:
hw_features_per_device = []
for j in range(len(sel_list)):
hw_features_per_device.append(net_dict[key][1][sel_list[j]])
hw_features_cncat.append(hw_features_per_device)
#If this is not done separately, the code will break
for key in net_dict:
net_dict[key][1] = np.delete(net_dict[key][1], sel_list, axis=0)
net_dict[key][2] = np.delete(net_dict[key][2], sel_list, axis=0)
return sel_list, hw_features_cncat
def KL(a, b):
a = np.asarray(a, dtype=np.float)
b = np.asarray(b, dtype=np.float)
return np.sum(np.where(a != 0, a * np.log(a / b), 0))
def chooseFirstNetMI(data):
kde = np.ones_like(data)
print(data.shape)
for i in range(data.shape[0]):
a = data[i].reshape(-1,1)
# print(a.shape)
k = KernelDensity(kernel='gaussian', bandwidth=0.5).fit(a)
kde[i] = k.score_samples(a) #sample(a.shape[0])
kde[i] = np.exp(kde[i])
print(kde.shape)
meanval = np.mean(kde, axis=0)
print(meanval.shape)
print(meanval)
maxval = -10000000
maxindex = 0
for i in range(kde.shape[0]):
val = KL(meanval, kde[i])
print(val)
if val >= maxval:
maxval = val
maxindex = i
return maxindex
## Mutual Information Implementation
def mutual_information_v2(net_dict, numSamples, choose_minimal=True):
index = 0
## Rows - Networks, Columns - Hardware
for key in net_dict:
net_dict[key][2] = net_dict[key][2][:numLatency,:,:]
net_dict[key][1] = net_dict[key][1][:numLatency]
for key in net_dict:
if index == 0:
stacked_arr = net_dict[key][1]
else:
stacked_arr = np.column_stack((stacked_arr, net_dict[key][1]))
index+=1
quantize = np.arange(0, 101, 33)
nlevels = len(quantize)
print(stacked_arr.shape)
nrows = stacked_arr.shape[0]
ncols = stacked_arr.shape[1]
for i in range(nrows):
a_ = stacked_arr[i, :]
p = np.percentile(a_, quantize)
bins = np.digitize(a_, p)
stacked_arr[i, :] = bins - 1
# print(stacked_arr[0:5,:])
# exit()
#val = np.random.randint(0, nrows)
#val = select_network()
val = chooseFirstNetMI(stacked_arr)
sel_list = [val]
hw_features_cncat = []
max_info_lst = []
print( " ------------------------------------- Beginning Sampling -------------------")
for k in range(numSamples-1):
max_info = 0
for i in range(nrows):
if i in sel_list:
continue
m = -1*mutual_info(stacked_arr, sel_list + [i], nrows, ncols)
if m >= max_info:
max_index = i
max_info = m
max_info_lst.append(max_info)
sel_list = sel_list + [max_index]
print(" ------------------------------- Done Sampling -----------------------------", len(sel_list))
matplotlib.rcParams['figure.dpi'] = 500
plt.figure()
plt.xlabel('Iterations')
plt.ylabel('Mutual Information Score')
# plt.title('Mutual Information Score over iterations')
plt.plot(np.arange(len(max_info_lst)), max_info_lst,'-o')
plt.savefig(args.name+'/plots/mutual_info_score.png')
print(max_info_lst)
print(sel_list)
if choose_minimal == True:
out_index = len(max_info_lst)
epsilon = 0.05
for i in range(1, len(max_info_lst)):
val = max_info_lst[i] - max_info_lst[i-1]
if val < epsilon:
out_index = i
break
print(out_index)
sel_list = sel_list[:out_index]
print(sel_list)
#exit(0)
for key in net_dict:
hw_features_per_device = []
for j in range(len(sel_list)):
hw_features_per_device.append(net_dict[key][1][sel_list[j]])
hw_features_cncat.append(hw_features_per_device)
#If this is not done separately, the code will break
for key in net_dict:
net_dict[key][1] = np.delete(net_dict[key][1], sel_list, axis=0)
net_dict[key][2] = np.delete(net_dict[key][2], sel_list, axis=0)
return sel_list, hw_features_cncat
def mutual_info(arr, row_list, nrows, ncols):
arr_temp = arr[row_list, :]
t = tuple(arr_temp[i, :] for i in np.arange(len(row_list) - 1, -1, -1))
inds = np.lexsort(t)
a_sorted = arr_temp[:, inds]
mutual_info = 0
k = 0
for i in range(1, ncols):
k+=1
if not np.array_equal(a_sorted[:,i-1], a_sorted[:,i]):
mutual_info -= (k/ncols)*np.log(k/ncols)
k=0
a_sorted = np.sort(a_sorted[-1, :])
self_info = 0
k = 0
for i in range(1, ncols):
k += 1
if not a_sorted[i] == a_sorted[i-1]:
self_info -= (k/ncols)*np.log(k/ncols)
k = 0
# print(row_list[-1], self_info, mutual_info, self_info-mutual_info)
return self_info - mutual_info
def learn_individual_models(list_val_dict, splitPercentage=0.99, shuffleFeatures=True):
global maxVal
## Identifying the max latency
for key in list_val_dict:
maxValTemp = np.amax(list_val_dict[key][1])
if maxValTemp > maxVal:
maxVal = maxValTemp
##Normalizing the latency by the maxlatency
for key in list_val_dict:
list_val_dict[key][1] = list_val_dict[key][1] / maxVal
for key in list_val_dict:
if args.model == "lstm":
learn_lstm_model(key, list_val_dict[key][0], list_val_dict[key][1], list_val_dict[key][2], list_val_dict[key][2].shape[2])
elif args.model == "xgb":
# learn_xgb_model_collab(key, list_val_dict[key][0], list_val_dict[key][1], list_val_dict[key][2], list_val_dict[key][2].shape[2], splitPercentage, shuffleFeatures)
learn_xgb_model(key, list_val_dict[key][0], list_val_dict[key][1], list_val_dict[key][2], list_val_dict[key][2].shape[2], splitPercentage, shuffleFeatures)
'''
Holds out one hardware at a time and learns a combined model for the remaining hardware and tries to
predict for the held-out hardware without any fine-tuning
'''
def learn_collaborative_models(list_val_dict):
final_indices = 0
if args.sampling_type == "random":
final_indices = random_indices(args.numSamples)
splitVal = 0.2
### Take a new set and see how it works
list_val_dict_set1 = dict(list(list_val_dict.items())[int(0.9*(len(list_val_dict))):])
list_val_dict_rem = dict(list(list_val_dict.items())[:int(0.9*(len(list_val_dict)))])
## Split the hardware into a smaller and a larger set
list_val_dict_small = dict(list(list_val_dict_rem.items())[:int(splitVal*(len(list_val_dict_rem)))])
list_val_dict_large = dict(list(list_val_dict_rem.items())[int(splitVal*(len(list_val_dict_rem))):])
if args.model == "lstm":
model, modellist, extractor, final_indices, final_lat, final_features = subsetAndLearn(list_val_dict_small, final_indices, args.numSamples)
elif args.model == "xgb":
model, final_indices, final_lat, final_features = subsetAndLearn(list_val_dict_small, final_indices, args.numSamples)
####### Test Transfer for each hardware #####################
######## Transfer for the remaining held-out set #############
list_val_dict_set1, hw_features_cncat = cncatHardwareRep(list_val_dict_set1, final_indices)
final_lat_set1, final_features_set1 = append_with_net_features(list_val_dict_set1, hw_features_cncat)
if args.model == "lstm":
checkTransfer(final_lat_set1, final_features_set1, model, final_indices, modellist, extractor, hardware="Set1")
elif args.model == "xgb":
checkTransfer(final_lat_set1, final_features_set1, model, final_indices, hardware="Set1")
################### Evaluate how many network inputs are required to learn the same model ###########
#for key in list_val_dict_set1:
# for i in range
################### Fine tune the model with the new batched hardware set ###########################
################### Continue this experiment for more batches! ######################################
def cncatHardwareRep(net_dict, final_indices):
for key in net_dict:
net_dict[key][2] = net_dict[key][2][:numLatency,:,:]
net_dict[key][1] = net_dict[key][1][:numLatency]
hw_features_cncat = []
for key in net_dict:
hw_features_per_device = []
for j in range(len(final_indices)):
hw_features_per_device.append(net_dict[key][1][final_indices[j]])
hw_features_cncat.append(hw_features_per_device)
#If this is not done separately, the code will break
for key in net_dict:
net_dict[key][1] = np.delete(net_dict[key][1], final_indices, axis=0)
net_dict[key][2] = np.delete(net_dict[key][2], final_indices, axis=0)
return net_dict, hw_features_cncat
def subsetAndLearn(net_dict, final_indices, numSamples):
if args.sampling_type == 'random':
hw_features_cncat = random_sampling(net_dict, final_indices, numSamples)
elif args.sampling_type == 'statistical':
final_indices, hw_features_cncat = sample_hwrepresentation(net_dict, numSamples)
elif args.sampling_type == 'mutual_info':
final_indices, hw_features_cncat = mutual_information_v2(net_dict, numSamples, choose_minimal=False)
elif args.sampling_type == 'spearmanCorr':
final_indices, hw_features_cncat = spearmanCorr(net_dict, numSamples)
elif args.sampling_type == 'pearsonCorr':
final_indices, hw_features_cncat = pearsonCorr(net_dict, numSamples)
else:
print("Invalid --sampling_type - Fix")
exit(0)
dumpSelectedNetworks(final_indices)
final_lat, final_features = append_with_net_features(net_dict, hw_features_cncat)
print(final_lat.shape, final_features.shape)
#final_lat = final_lat / np.amax(final_lat)
#print(list_val_dict[key][0], final_lat.shape, final_features.shape)
files = glob.glob('*.txt')
hardware = 'Mixed Model'
if args.model=='lstm':
model, modellist, extractor = learn_lstm_model(hardware, net_dict[files[0]][0], final_lat, final_features, final_features.shape[2])
return model, modellist, extractor, final_indices, final_lat, final_features
elif args.model=='xgb':
model = learn_xgb_model(hardware, net_dict[files[0]][0], final_lat, final_features, final_features.shape[2])
return model, final_indices, final_lat, final_features
def checkTransfer(lat, features, model, final_indices, modellist = None, extractor = None, hardware="Mixed Model"):
global maxVal
testf = features
testy = lat
if args.model == 'lstm':
print(testf.shape, testy.shape)
testPredict = model.predict(testf)
testScore = math.sqrt(mean_squared_error(testy, testPredict))
writeToFile('Transfer Test Score: %f RMSE' % (testScore))
r2_score = sklearn.metrics.r2_score(testy, testPredict)
s_coefficient, pvalue = spearmanr(testy, testPredict)
writeToFile("The transferred R^2 Value for Held out set is: %f"%(r2_score))
writeToFile("The transferred Spearnman Coefficient and p-value for Held-out set is: %f and %f"%(s_coefficient, pvalue))
plt.figure()
plt.xlabel("Actual Latency (in ms)")
plt.ylabel("Predicted Latency (in ms)")
sns.scatterplot(testy, testPredict[:,0])
#plt.title(hold_out_key+'TPear R2:'+str(r2_score)+' TSpear R2:'+str(s_coefficient))
plt.savefig(args.name+'/plots/'+hardware+'_transferFC.png')
testPredict = extractor.predict(testf)
for name, model_lowB in modellist:
modeltestPred = model_lowB.predict(testPredict)
testScore = math.sqrt(mean_squared_error(testy, modeltestPred))
r2_score = sklearn.metrics.r2_score(testy, modeltestPred)
s_coefficient, pvalue = spearmanr(testy, modeltestPred)
writeToFile('Transfer Test Score with %s : %f RMSE' % (name, testScore))
writeToFile("Transfer The R^2 Value with %s for %s: %f"%(hardware, name, r2_score))
writeToFile("Transfer The Spearnman Coefficient and p-value for %s with %s : %f and %f"%(hardware, name, s_coefficient, pvalue))
plt.figure()
plt.xlabel("Actual Latency (in ms)")
plt.ylabel("Predicted Latency (in ms)")
sns.scatterplot(testy, modeltestPred)
#plt.title(name + hardware+' R2: '+str(r2_score)+' SpearVal: '+str(s_coefficient))
plt.savefig(args.name+'/plots/'+hardware+args.learning_type+'_'+name+'_Transfer.png')
elif args.model == 'xgb':
testf = np.reshape(testf, (testf.shape[0], testf.shape[1]*testf.shape[2]))
print(testf.shape, testy.shape)
testPredict = model.predict(testf)
testScore = math.sqrt(mean_squared_error(testy, testPredict))
writeToFile('Transfer Test Score: %f RMSE' % (testScore))
r2_score = sklearn.metrics.r2_score(testy, testPredict)
s_coefficient, pvalue = spearmanr(testy, testPredict)
writeToFile("The transferred R^2 Value for Held out set is: %f"%(r2_score))
writeToFile("The transferred Spearnman Coefficient and p-value for Held-out set is: %f and %f"%(s_coefficient, pvalue))
testyPlot = testy * maxVal
testPredictPlot = testPredict * maxVal
testPlotScore = math.sqrt(mean_squared_error(testyPlot, testPredictPlot))
writeToFile('Normalized Transfer Test Score: %f RMSE' % (testPlotScore))
np.savetxt(args.name+'/meta/'+'testy.txt', testyPlot, delimiter='\n')
np.savetxt(args.name+'/meta/'+'testPredict.txt', testPredictPlot, delimiter='\n')
plt.figure()
plt.xlabel("Actual Latency (in ms)")
plt.ylabel("Predicted Latency (in ms)")
sns.scatterplot(testy, testPredict, s=15)
#plt.title(hold_out_key+'TPear R2:'+str(r2_score)+' TSpear R2:'+str(s_coefficient))
plt.savefig(args.name+'/plots/'+hardware+'_transferFC_scaled_down.png')
matplotlib.rcParams['figure.dpi'] = 500
plt.figure()
plt.xlabel("Actual Latency (in ms)")
plt.ylabel("Predicted Latency (in ms)")
sns.scatterplot(testyPlot, testPredictPlot, s=15)
#plt.title(hold_out_key+'TPear R2:'+str(r2_score)+' TSpear R2:'+str(s_coefficient))
plt.savefig(args.name+'/plots/'+hardware+'_transferFC_Scaled_up.png')
matplotlib.rcParams['figure.dpi'] = 500
plt.figure()
plt.xlabel("Actual Latency (in ms)")
plt.ylabel("Predicted Latency (in ms)")
sns.regplot(x=testyPlot, y=testPredictPlot, scatter_kws={'s':10, 'color':'blue'})
plt.savefig(args.name+'/plots/'+hardware+'_transferFCregPlot.png')
RMSEError(testy, testPredict)
calcErrors(testy, testPredict)
def learn_combined_models(list_val_dict):
final_indices = 0
if args.sampling_type == 'random':
final_indices = random_indices(args.numSamples)
global maxVal
## Identifying the max latency
for key in list_val_dict:
maxValTemp = np.amax(list_val_dict[key][1])
if maxValTemp > maxVal:
maxVal = maxValTemp
##Normalizing the latency by the maxlatency
for key in list_val_dict:
list_val_dict[key][1] = list_val_dict[key][1] / maxVal
## Splitting the dictionary into 70% and 30%
list_val_dict_70 = dict(list(list_val_dict.items())[:int(0.7*(len(list_val_dict)))])
list_val_dict_30 = dict(list(list_val_dict.items())[int(0.7*(len(list_val_dict))):])
print(len(list_val_dict), len(list_val_dict_70), len(list_val_dict_30))
if args.model == "lstm":
model, modellist, extractor, final_indices, final_lat, final_features = subsetAndLearn(list_val_dict_70, final_indices, args.numSamples)
elif args.model == "xgb":
model, final_indices, final_lat, final_features = subsetAndLearn(list_val_dict_70, final_indices, args.numSamples)
######## Transfer for the remaining held-out set #############
list_val_dict_30, hw_features_cncat = cncatHardwareRep(list_val_dict_30, final_indices)
final_lat_30, final_features_30 = append_with_net_features(list_val_dict_30, hw_features_cncat)
if args.model == "lstm":
checkTransfer(final_lat_30, final_features_30, model, final_indices, modellist, extractor)
elif args.model == "xgb":
checkTransfer(final_lat_30, final_features_30, model, final_indices)
########## Calculate Type I and Type II errors ###################
#calcErrors(testy, testPredict)
from itertools import product
from itertools import combinations
from scipy.spatial import distance
def calcErrors(testy, testPredict):
global maxVal
testy = testy * maxVal
testPredict = testPredict * maxVal
print(testy.shape, testPredict.shape)
#print(testy, testPredict)
## testy has each hardware's latency stacked up - one after the other - first 118, second 118 and so on
hardwareRange = int(math.ceil(testy.shape[0] / (numLatency-args.numSamples)))
print(hardwareRange)
networkRange = numLatency - args.numSamples
type1ErrP = []
type2ErrP = []
for i in range(hardwareRange):
testy_hardware = testy[i*networkRange:(i+1)*networkRange]
testPredict_hardware = testPredict[i*networkRange:(i+1)*networkRange]
#print(testy_hardware.shape, testPredict_hardware.shape)
c = list(combinations(testy_hardware,2))
d = list(combinations(testPredict_hardware, 2))
#c = list(product(testy_hardware, testy_hardware))
#d = list(product(testPredict_hardware, testPredict_hardware))
#print(len(c), len(d))
#print("================================ Hardware %d =========================="%(i))
typeThres = 0.1
distance_testy = np.ones(len(c))
distance_testPredict = np.ones(len(d))
for j in range(distance_testy.shape[0]):
distance_testy[j] = distance.euclidean(c[j][0], c[j][1])
distance_testPredict[j] = distance.euclidean(d[j][0], d[j][1])
#print(distance_testy.shape, distance_testPredict.shape)
type1Err = 0
type2Err = 0
for j in range(distance_testy.shape[0]):
dev1 = (distance_testy[j] - distance_testPredict[j]) / distance_testy[j]
dev2 = (distance_testPredict[j] - distance_testy[j]) / distance_testPredict[j]
if (distance_testy[j] > distance_testPredict[j]) and (dev1 > typeThres):
type1Err += 1
elif (distance_testPredict[j] > distance_testy[j]) and (dev2 > typeThres):
type2Err +=1
#print("For Hardware %d - Type1Err Percentage: %f, Type2Err Percentage: %f, Threshold: %f"%(i,(type1Err/distance_testy.shape[0])*100,(type2Err/distance_testy.shape[0])*100, typeThres))
type1ErrP.append((type1Err/distance_testy.shape[0])*100)
type2ErrP.append((type2Err/distance_testy.shape[0])*100)
print(mean(type1ErrP), mean(type2ErrP))
writeToFile('Type-1 Error: ' +str(mean(type1ErrP)) + ' Type-2 Error: ' +str(mean(type2ErrP)))
writeToFileError(type1ErrP, type2ErrP)
def mean_absolute_percentage_error(y_true, y_pred):
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
def RMSEError(testy, testPredict):
global maxVal
testy = testy * maxVal
testPredict = testPredict * maxVal
## testy has each hardware's latency stacked up - one after the other - first 118, second 118 and so on
networkRange = numLatency - args.numSamples
hardwareRange = int(math.ceil(testy.shape[0] / networkRange))
print(hardwareRange)
l = []
for i in range(hardwareRange):
testy_hardware = testy[i*networkRange:(i+1)*networkRange]
testPredict_hardware = testPredict[i*networkRange:(i+1)*networkRange]
r2_score = sklearn.metrics.r2_score(testy_hardware, testPredict_hardware)
rmse = math.sqrt(mean_squared_error(testy_hardware, testPredict_hardware))
mape = mean_absolute_percentage_error(testy_hardware, testPredict_hardware)
avg = np.mean(testy_hardware)
med = np.median(testy_hardware)
l.append([r2_score, rmse, | np.amin(testy_hardware) | numpy.amin |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.